1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 case ICE_DEV_ID_E830_BACKPLANE: 158 case ICE_DEV_ID_E830_QSFP56: 159 case ICE_DEV_ID_E830_SFP: 160 case ICE_DEV_ID_E830_SFP_DD: 161 hw->mac_type = ICE_MAC_E830; 162 break; 163 default: 164 hw->mac_type = ICE_MAC_UNKNOWN; 165 break; 166 } 167 168 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 169 return 0; 170 } 171 172 /** 173 * ice_is_e810 174 * @hw: pointer to the hardware structure 175 * 176 * returns true if the device is E810 based, false if not. 177 */ 178 bool ice_is_e810(struct ice_hw *hw) 179 { 180 return hw->mac_type == ICE_MAC_E810; 181 } 182 183 /** 184 * ice_is_e810t 185 * @hw: pointer to the hardware structure 186 * 187 * returns true if the device is E810T based, false if not. 188 */ 189 bool ice_is_e810t(struct ice_hw *hw) 190 { 191 switch (hw->device_id) { 192 case ICE_DEV_ID_E810C_SFP: 193 switch (hw->subsystem_device_id) { 194 case ICE_SUBDEV_ID_E810T: 195 case ICE_SUBDEV_ID_E810T2: 196 case ICE_SUBDEV_ID_E810T3: 197 case ICE_SUBDEV_ID_E810T4: 198 case ICE_SUBDEV_ID_E810T6: 199 case ICE_SUBDEV_ID_E810T7: 200 return true; 201 } 202 break; 203 case ICE_DEV_ID_E810C_QSFP: 204 switch (hw->subsystem_device_id) { 205 case ICE_SUBDEV_ID_E810T2: 206 case ICE_SUBDEV_ID_E810T3: 207 case ICE_SUBDEV_ID_E810T5: 208 return true; 209 } 210 break; 211 default: 212 break; 213 } 214 215 return false; 216 } 217 218 /** 219 * ice_is_e823 220 * @hw: pointer to the hardware structure 221 * 222 * returns true if the device is E823-L or E823-C based, false if not. 223 */ 224 bool ice_is_e823(struct ice_hw *hw) 225 { 226 switch (hw->device_id) { 227 case ICE_DEV_ID_E823L_BACKPLANE: 228 case ICE_DEV_ID_E823L_SFP: 229 case ICE_DEV_ID_E823L_10G_BASE_T: 230 case ICE_DEV_ID_E823L_1GBE: 231 case ICE_DEV_ID_E823L_QSFP: 232 case ICE_DEV_ID_E823C_BACKPLANE: 233 case ICE_DEV_ID_E823C_QSFP: 234 case ICE_DEV_ID_E823C_SFP: 235 case ICE_DEV_ID_E823C_10G_BASE_T: 236 case ICE_DEV_ID_E823C_SGMII: 237 return true; 238 default: 239 return false; 240 } 241 } 242 243 /** 244 * ice_clear_pf_cfg - Clear PF configuration 245 * @hw: pointer to the hardware structure 246 * 247 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 248 * configuration, flow director filters, etc.). 249 */ 250 int ice_clear_pf_cfg(struct ice_hw *hw) 251 { 252 struct ice_aq_desc desc; 253 254 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 255 256 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 257 } 258 259 /** 260 * ice_aq_manage_mac_read - manage MAC address read command 261 * @hw: pointer to the HW struct 262 * @buf: a virtual buffer to hold the manage MAC read response 263 * @buf_size: Size of the virtual buffer 264 * @cd: pointer to command details structure or NULL 265 * 266 * This function is used to return per PF station MAC address (0x0107). 267 * NOTE: Upon successful completion of this command, MAC address information 268 * is returned in user specified buffer. Please interpret user specified 269 * buffer as "manage_mac_read" response. 270 * Response such as various MAC addresses are stored in HW struct (port.mac) 271 * ice_discover_dev_caps is expected to be called before this function is 272 * called. 273 */ 274 static int 275 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 276 struct ice_sq_cd *cd) 277 { 278 struct ice_aqc_manage_mac_read_resp *resp; 279 struct ice_aqc_manage_mac_read *cmd; 280 struct ice_aq_desc desc; 281 int status; 282 u16 flags; 283 u8 i; 284 285 cmd = &desc.params.mac_read; 286 287 if (buf_size < sizeof(*resp)) 288 return -EINVAL; 289 290 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 291 292 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 293 if (status) 294 return status; 295 296 resp = buf; 297 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 298 299 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 300 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 301 return -EIO; 302 } 303 304 /* A single port can report up to two (LAN and WoL) addresses */ 305 for (i = 0; i < cmd->num_addr; i++) 306 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 307 ether_addr_copy(hw->port_info->mac.lan_addr, 308 resp[i].mac_addr); 309 ether_addr_copy(hw->port_info->mac.perm_addr, 310 resp[i].mac_addr); 311 break; 312 } 313 314 return 0; 315 } 316 317 /** 318 * ice_aq_get_phy_caps - returns PHY capabilities 319 * @pi: port information structure 320 * @qual_mods: report qualified modules 321 * @report_mode: report mode capabilities 322 * @pcaps: structure for PHY capabilities to be filled 323 * @cd: pointer to command details structure or NULL 324 * 325 * Returns the various PHY capabilities supported on the Port (0x0600) 326 */ 327 int 328 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 329 struct ice_aqc_get_phy_caps_data *pcaps, 330 struct ice_sq_cd *cd) 331 { 332 struct ice_aqc_get_phy_caps *cmd; 333 u16 pcaps_size = sizeof(*pcaps); 334 struct ice_aq_desc desc; 335 const char *prefix; 336 struct ice_hw *hw; 337 int status; 338 339 cmd = &desc.params.get_phy; 340 341 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 342 return -EINVAL; 343 hw = pi->hw; 344 345 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 346 !ice_fw_supports_report_dflt_cfg(hw)) 347 return -EINVAL; 348 349 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 350 351 if (qual_mods) 352 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 353 354 cmd->param0 |= cpu_to_le16(report_mode); 355 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 356 357 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 358 359 switch (report_mode) { 360 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 361 prefix = "phy_caps_media"; 362 break; 363 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 364 prefix = "phy_caps_no_media"; 365 break; 366 case ICE_AQC_REPORT_ACTIVE_CFG: 367 prefix = "phy_caps_active"; 368 break; 369 case ICE_AQC_REPORT_DFLT_CFG: 370 prefix = "phy_caps_default"; 371 break; 372 default: 373 prefix = "phy_caps_invalid"; 374 } 375 376 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 377 le64_to_cpu(pcaps->phy_type_high), prefix); 378 379 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 380 prefix, report_mode); 381 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 382 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 383 pcaps->low_power_ctrl_an); 384 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 385 pcaps->eee_cap); 386 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 387 pcaps->eeer_value); 388 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 389 pcaps->link_fec_options); 390 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 391 prefix, pcaps->module_compliance_enforcement); 392 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 393 prefix, pcaps->extended_compliance_code); 394 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 395 pcaps->module_type[0]); 396 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 397 pcaps->module_type[1]); 398 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 399 pcaps->module_type[2]); 400 401 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 402 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 403 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 404 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 405 sizeof(pi->phy.link_info.module_type)); 406 } 407 408 return status; 409 } 410 411 /** 412 * ice_aq_get_link_topo_handle - get link topology node return status 413 * @pi: port information structure 414 * @node_type: requested node type 415 * @cd: pointer to command details structure or NULL 416 * 417 * Get link topology node return status for specified node type (0x06E0) 418 * 419 * Node type cage can be used to determine if cage is present. If AQC 420 * returns error (ENOENT), then no cage present. If no cage present, then 421 * connection type is backplane or BASE-T. 422 */ 423 static int 424 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 425 struct ice_sq_cd *cd) 426 { 427 struct ice_aqc_get_link_topo *cmd; 428 struct ice_aq_desc desc; 429 430 cmd = &desc.params.get_link_topo; 431 432 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 433 434 cmd->addr.topo_params.node_type_ctx = 435 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 436 ICE_AQC_LINK_TOPO_NODE_CTX_S); 437 438 /* set node type */ 439 cmd->addr.topo_params.node_type_ctx |= 440 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 441 442 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 443 } 444 445 /** 446 * ice_aq_get_netlist_node 447 * @hw: pointer to the hw struct 448 * @cmd: get_link_topo AQ structure 449 * @node_part_number: output node part number if node found 450 * @node_handle: output node handle parameter if node found 451 * 452 * Get netlist node handle. 453 */ 454 int 455 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 456 u8 *node_part_number, u16 *node_handle) 457 { 458 struct ice_aq_desc desc; 459 460 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 461 desc.params.get_link_topo = *cmd; 462 463 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 464 return -EINTR; 465 466 if (node_handle) 467 *node_handle = 468 le16_to_cpu(desc.params.get_link_topo.addr.handle); 469 if (node_part_number) 470 *node_part_number = desc.params.get_link_topo.node_part_num; 471 472 return 0; 473 } 474 475 /** 476 * ice_find_netlist_node 477 * @hw: pointer to the hw struct 478 * @node_type_ctx: type of netlist node to look for 479 * @node_part_number: node part number to look for 480 * @node_handle: output parameter if node found - optional 481 * 482 * Scan the netlist for a node handle of the given node type and part number. 483 * 484 * If node_handle is non-NULL it will be modified on function exit. It is only 485 * valid if the function returns zero, and should be ignored on any non-zero 486 * return value. 487 * 488 * Returns: 0 if the node is found, -ENOENT if no handle was found, and 489 * a negative error code on failure to access the AQ. 490 */ 491 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 492 u8 node_part_number, u16 *node_handle) 493 { 494 u8 idx; 495 496 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 497 struct ice_aqc_get_link_topo cmd = {}; 498 u8 rec_node_part_number; 499 int status; 500 501 cmd.addr.topo_params.node_type_ctx = 502 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, 503 node_type_ctx); 504 cmd.addr.topo_params.index = idx; 505 506 status = ice_aq_get_netlist_node(hw, &cmd, 507 &rec_node_part_number, 508 node_handle); 509 if (status) 510 return status; 511 512 if (rec_node_part_number == node_part_number) 513 return 0; 514 } 515 516 return -ENOENT; 517 } 518 519 /** 520 * ice_is_media_cage_present 521 * @pi: port information structure 522 * 523 * Returns true if media cage is present, else false. If no cage, then 524 * media type is backplane or BASE-T. 525 */ 526 static bool ice_is_media_cage_present(struct ice_port_info *pi) 527 { 528 /* Node type cage can be used to determine if cage is present. If AQC 529 * returns error (ENOENT), then no cage present. If no cage present then 530 * connection type is backplane or BASE-T. 531 */ 532 return !ice_aq_get_link_topo_handle(pi, 533 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 534 NULL); 535 } 536 537 /** 538 * ice_get_media_type - Gets media type 539 * @pi: port information structure 540 */ 541 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 542 { 543 struct ice_link_status *hw_link_info; 544 545 if (!pi) 546 return ICE_MEDIA_UNKNOWN; 547 548 hw_link_info = &pi->phy.link_info; 549 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 550 /* If more than one media type is selected, report unknown */ 551 return ICE_MEDIA_UNKNOWN; 552 553 if (hw_link_info->phy_type_low) { 554 /* 1G SGMII is a special case where some DA cable PHYs 555 * may show this as an option when it really shouldn't 556 * be since SGMII is meant to be between a MAC and a PHY 557 * in a backplane. Try to detect this case and handle it 558 */ 559 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 560 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 561 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 562 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 563 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 564 return ICE_MEDIA_DA; 565 566 switch (hw_link_info->phy_type_low) { 567 case ICE_PHY_TYPE_LOW_1000BASE_SX: 568 case ICE_PHY_TYPE_LOW_1000BASE_LX: 569 case ICE_PHY_TYPE_LOW_10GBASE_SR: 570 case ICE_PHY_TYPE_LOW_10GBASE_LR: 571 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 572 case ICE_PHY_TYPE_LOW_25GBASE_SR: 573 case ICE_PHY_TYPE_LOW_25GBASE_LR: 574 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 575 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 576 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 577 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 578 case ICE_PHY_TYPE_LOW_50GBASE_SR: 579 case ICE_PHY_TYPE_LOW_50GBASE_FR: 580 case ICE_PHY_TYPE_LOW_50GBASE_LR: 581 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 582 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 583 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 584 case ICE_PHY_TYPE_LOW_100GBASE_DR: 585 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 586 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 587 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 588 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 589 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 590 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 591 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 592 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 593 return ICE_MEDIA_FIBER; 594 case ICE_PHY_TYPE_LOW_100BASE_TX: 595 case ICE_PHY_TYPE_LOW_1000BASE_T: 596 case ICE_PHY_TYPE_LOW_2500BASE_T: 597 case ICE_PHY_TYPE_LOW_5GBASE_T: 598 case ICE_PHY_TYPE_LOW_10GBASE_T: 599 case ICE_PHY_TYPE_LOW_25GBASE_T: 600 return ICE_MEDIA_BASET; 601 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 602 case ICE_PHY_TYPE_LOW_25GBASE_CR: 603 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 604 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 605 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 606 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 607 case ICE_PHY_TYPE_LOW_50GBASE_CP: 608 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 609 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 610 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 611 return ICE_MEDIA_DA; 612 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 613 case ICE_PHY_TYPE_LOW_40G_XLAUI: 614 case ICE_PHY_TYPE_LOW_50G_LAUI2: 615 case ICE_PHY_TYPE_LOW_50G_AUI2: 616 case ICE_PHY_TYPE_LOW_50G_AUI1: 617 case ICE_PHY_TYPE_LOW_100G_AUI4: 618 case ICE_PHY_TYPE_LOW_100G_CAUI4: 619 if (ice_is_media_cage_present(pi)) 620 return ICE_MEDIA_DA; 621 fallthrough; 622 case ICE_PHY_TYPE_LOW_1000BASE_KX: 623 case ICE_PHY_TYPE_LOW_2500BASE_KX: 624 case ICE_PHY_TYPE_LOW_2500BASE_X: 625 case ICE_PHY_TYPE_LOW_5GBASE_KR: 626 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 627 case ICE_PHY_TYPE_LOW_25GBASE_KR: 628 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 629 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 630 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 631 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 632 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 633 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 634 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 635 return ICE_MEDIA_BACKPLANE; 636 } 637 } else { 638 switch (hw_link_info->phy_type_high) { 639 case ICE_PHY_TYPE_HIGH_100G_AUI2: 640 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 641 if (ice_is_media_cage_present(pi)) 642 return ICE_MEDIA_DA; 643 fallthrough; 644 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 645 return ICE_MEDIA_BACKPLANE; 646 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 647 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 648 return ICE_MEDIA_FIBER; 649 } 650 } 651 return ICE_MEDIA_UNKNOWN; 652 } 653 654 /** 655 * ice_get_link_status_datalen 656 * @hw: pointer to the HW struct 657 * 658 * Returns datalength for the Get Link Status AQ command, which is bigger for 659 * newer adapter families handled by ice driver. 660 */ 661 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 662 { 663 switch (hw->mac_type) { 664 case ICE_MAC_E830: 665 return ICE_AQC_LS_DATA_SIZE_V2; 666 case ICE_MAC_E810: 667 default: 668 return ICE_AQC_LS_DATA_SIZE_V1; 669 } 670 } 671 672 /** 673 * ice_aq_get_link_info 674 * @pi: port information structure 675 * @ena_lse: enable/disable LinkStatusEvent reporting 676 * @link: pointer to link status structure - optional 677 * @cd: pointer to command details structure or NULL 678 * 679 * Get Link Status (0x607). Returns the link status of the adapter. 680 */ 681 int 682 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 683 struct ice_link_status *link, struct ice_sq_cd *cd) 684 { 685 struct ice_aqc_get_link_status_data link_data = { 0 }; 686 struct ice_aqc_get_link_status *resp; 687 struct ice_link_status *li_old, *li; 688 enum ice_media_type *hw_media_type; 689 struct ice_fc_info *hw_fc_info; 690 bool tx_pause, rx_pause; 691 struct ice_aq_desc desc; 692 struct ice_hw *hw; 693 u16 cmd_flags; 694 int status; 695 696 if (!pi) 697 return -EINVAL; 698 hw = pi->hw; 699 li_old = &pi->phy.link_info_old; 700 hw_media_type = &pi->phy.media_type; 701 li = &pi->phy.link_info; 702 hw_fc_info = &pi->fc; 703 704 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 705 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 706 resp = &desc.params.get_link_status; 707 resp->cmd_flags = cpu_to_le16(cmd_flags); 708 resp->lport_num = pi->lport; 709 710 status = ice_aq_send_cmd(hw, &desc, &link_data, 711 ice_get_link_status_datalen(hw), cd); 712 if (status) 713 return status; 714 715 /* save off old link status information */ 716 *li_old = *li; 717 718 /* update current link status information */ 719 li->link_speed = le16_to_cpu(link_data.link_speed); 720 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 721 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 722 *hw_media_type = ice_get_media_type(pi); 723 li->link_info = link_data.link_info; 724 li->link_cfg_err = link_data.link_cfg_err; 725 li->an_info = link_data.an_info; 726 li->ext_info = link_data.ext_info; 727 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 728 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 729 li->topo_media_conflict = link_data.topo_media_conflict; 730 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 731 ICE_AQ_CFG_PACING_TYPE_M); 732 733 /* update fc info */ 734 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 735 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 736 if (tx_pause && rx_pause) 737 hw_fc_info->current_mode = ICE_FC_FULL; 738 else if (tx_pause) 739 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 740 else if (rx_pause) 741 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 742 else 743 hw_fc_info->current_mode = ICE_FC_NONE; 744 745 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 746 747 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 748 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 749 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 750 (unsigned long long)li->phy_type_low); 751 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 752 (unsigned long long)li->phy_type_high); 753 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 754 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 755 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 756 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 757 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 758 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 759 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 760 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 761 li->max_frame_size); 762 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 763 764 /* save link status information */ 765 if (link) 766 *link = *li; 767 768 /* flag cleared so calling functions don't call AQ again */ 769 pi->phy.get_link_info = false; 770 771 return 0; 772 } 773 774 /** 775 * ice_fill_tx_timer_and_fc_thresh 776 * @hw: pointer to the HW struct 777 * @cmd: pointer to MAC cfg structure 778 * 779 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 780 * descriptor 781 */ 782 static void 783 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 784 struct ice_aqc_set_mac_cfg *cmd) 785 { 786 u32 val, fc_thres_m; 787 788 /* We read back the transmit timer and FC threshold value of 789 * LFC. Thus, we will use index = 790 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 791 * 792 * Also, because we are operating on transmit timer and FC 793 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 794 */ 795 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 796 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 797 798 if (hw->mac_type == ICE_MAC_E830) { 799 /* Retrieve the transmit timer */ 800 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 801 cmd->tx_tmr_value = 802 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 803 804 /* Retrieve the fc threshold */ 805 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 806 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 807 } else { 808 /* Retrieve the transmit timer */ 809 val = rd32(hw, 810 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 811 cmd->tx_tmr_value = 812 le16_encode_bits(val, 813 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 814 815 /* Retrieve the fc threshold */ 816 val = rd32(hw, 817 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 818 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 819 } 820 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 821 } 822 823 /** 824 * ice_aq_set_mac_cfg 825 * @hw: pointer to the HW struct 826 * @max_frame_size: Maximum Frame Size to be supported 827 * @cd: pointer to command details structure or NULL 828 * 829 * Set MAC configuration (0x0603) 830 */ 831 int 832 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 833 { 834 struct ice_aqc_set_mac_cfg *cmd; 835 struct ice_aq_desc desc; 836 837 cmd = &desc.params.set_mac_cfg; 838 839 if (max_frame_size == 0) 840 return -EINVAL; 841 842 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 843 844 cmd->max_frame_size = cpu_to_le16(max_frame_size); 845 846 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 847 848 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 849 } 850 851 /** 852 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 853 * @hw: pointer to the HW struct 854 */ 855 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 856 { 857 struct ice_switch_info *sw; 858 int status; 859 860 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 861 sizeof(*hw->switch_info), GFP_KERNEL); 862 sw = hw->switch_info; 863 864 if (!sw) 865 return -ENOMEM; 866 867 INIT_LIST_HEAD(&sw->vsi_list_map_head); 868 sw->prof_res_bm_init = 0; 869 870 status = ice_init_def_sw_recp(hw); 871 if (status) { 872 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 873 return status; 874 } 875 return 0; 876 } 877 878 /** 879 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 880 * @hw: pointer to the HW struct 881 */ 882 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 883 { 884 struct ice_switch_info *sw = hw->switch_info; 885 struct ice_vsi_list_map_info *v_pos_map; 886 struct ice_vsi_list_map_info *v_tmp_map; 887 struct ice_sw_recipe *recps; 888 u8 i; 889 890 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 891 list_entry) { 892 list_del(&v_pos_map->list_entry); 893 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 894 } 895 recps = sw->recp_list; 896 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 897 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 898 899 recps[i].root_rid = i; 900 list_for_each_entry_safe(rg_entry, tmprg_entry, 901 &recps[i].rg_list, l_entry) { 902 list_del(&rg_entry->l_entry); 903 devm_kfree(ice_hw_to_dev(hw), rg_entry); 904 } 905 906 if (recps[i].adv_rule) { 907 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 908 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 909 910 mutex_destroy(&recps[i].filt_rule_lock); 911 list_for_each_entry_safe(lst_itr, tmp_entry, 912 &recps[i].filt_rules, 913 list_entry) { 914 list_del(&lst_itr->list_entry); 915 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 916 devm_kfree(ice_hw_to_dev(hw), lst_itr); 917 } 918 } else { 919 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 920 921 mutex_destroy(&recps[i].filt_rule_lock); 922 list_for_each_entry_safe(lst_itr, tmp_entry, 923 &recps[i].filt_rules, 924 list_entry) { 925 list_del(&lst_itr->list_entry); 926 devm_kfree(ice_hw_to_dev(hw), lst_itr); 927 } 928 } 929 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 930 } 931 ice_rm_all_sw_replay_rule_info(hw); 932 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 933 devm_kfree(ice_hw_to_dev(hw), sw); 934 } 935 936 /** 937 * ice_get_fw_log_cfg - get FW logging configuration 938 * @hw: pointer to the HW struct 939 */ 940 static int ice_get_fw_log_cfg(struct ice_hw *hw) 941 { 942 struct ice_aq_desc desc; 943 __le16 *config; 944 int status; 945 u16 size; 946 947 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 948 config = kzalloc(size, GFP_KERNEL); 949 if (!config) 950 return -ENOMEM; 951 952 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 953 954 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 955 if (!status) { 956 u16 i; 957 958 /* Save FW logging information into the HW structure */ 959 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 960 u16 v, m, flgs; 961 962 v = le16_to_cpu(config[i]); 963 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 964 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 965 966 if (m < ICE_AQC_FW_LOG_ID_MAX) 967 hw->fw_log.evnts[m].cur = flgs; 968 } 969 } 970 971 kfree(config); 972 973 return status; 974 } 975 976 /** 977 * ice_cfg_fw_log - configure FW logging 978 * @hw: pointer to the HW struct 979 * @enable: enable certain FW logging events if true, disable all if false 980 * 981 * This function enables/disables the FW logging via Rx CQ events and a UART 982 * port based on predetermined configurations. FW logging via the Rx CQ can be 983 * enabled/disabled for individual PF's. However, FW logging via the UART can 984 * only be enabled/disabled for all PFs on the same device. 985 * 986 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 987 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 988 * before initializing the device. 989 * 990 * When re/configuring FW logging, callers need to update the "cfg" elements of 991 * the hw->fw_log.evnts array with the desired logging event configurations for 992 * modules of interest. When disabling FW logging completely, the callers can 993 * just pass false in the "enable" parameter. On completion, the function will 994 * update the "cur" element of the hw->fw_log.evnts array with the resulting 995 * logging event configurations of the modules that are being re/configured. FW 996 * logging modules that are not part of a reconfiguration operation retain their 997 * previous states. 998 * 999 * Before resetting the device, it is recommended that the driver disables FW 1000 * logging before shutting down the control queue. When disabling FW logging 1001 * ("enable" = false), the latest configurations of FW logging events stored in 1002 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 1003 * a device reset. 1004 * 1005 * When enabling FW logging to emit log messages via the Rx CQ during the 1006 * device's initialization phase, a mechanism alternative to interrupt handlers 1007 * needs to be used to extract FW log messages from the Rx CQ periodically and 1008 * to prevent the Rx CQ from being full and stalling other types of control 1009 * messages from FW to SW. Interrupts are typically disabled during the device's 1010 * initialization phase. 1011 */ 1012 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) 1013 { 1014 struct ice_aqc_fw_logging *cmd; 1015 u16 i, chgs = 0, len = 0; 1016 struct ice_aq_desc desc; 1017 __le16 *data = NULL; 1018 u8 actv_evnts = 0; 1019 void *buf = NULL; 1020 int status = 0; 1021 1022 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 1023 return 0; 1024 1025 /* Disable FW logging only when the control queue is still responsive */ 1026 if (!enable && 1027 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 1028 return 0; 1029 1030 /* Get current FW log settings */ 1031 status = ice_get_fw_log_cfg(hw); 1032 if (status) 1033 return status; 1034 1035 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 1036 cmd = &desc.params.fw_logging; 1037 1038 /* Indicate which controls are valid */ 1039 if (hw->fw_log.cq_en) 1040 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 1041 1042 if (hw->fw_log.uart_en) 1043 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 1044 1045 if (enable) { 1046 /* Fill in an array of entries with FW logging modules and 1047 * logging events being reconfigured. 1048 */ 1049 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 1050 u16 val; 1051 1052 /* Keep track of enabled event types */ 1053 actv_evnts |= hw->fw_log.evnts[i].cfg; 1054 1055 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 1056 continue; 1057 1058 if (!data) { 1059 data = devm_kcalloc(ice_hw_to_dev(hw), 1060 ICE_AQC_FW_LOG_ID_MAX, 1061 sizeof(*data), 1062 GFP_KERNEL); 1063 if (!data) 1064 return -ENOMEM; 1065 } 1066 1067 val = i << ICE_AQC_FW_LOG_ID_S; 1068 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 1069 data[chgs++] = cpu_to_le16(val); 1070 } 1071 1072 /* Only enable FW logging if at least one module is specified. 1073 * If FW logging is currently enabled but all modules are not 1074 * enabled to emit log messages, disable FW logging altogether. 1075 */ 1076 if (actv_evnts) { 1077 /* Leave if there is effectively no change */ 1078 if (!chgs) 1079 goto out; 1080 1081 if (hw->fw_log.cq_en) 1082 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 1083 1084 if (hw->fw_log.uart_en) 1085 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 1086 1087 buf = data; 1088 len = sizeof(*data) * chgs; 1089 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1090 } 1091 } 1092 1093 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 1094 if (!status) { 1095 /* Update the current configuration to reflect events enabled. 1096 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 1097 * logging mode is enabled for the device. They do not reflect 1098 * actual modules being enabled to emit log messages. So, their 1099 * values remain unchanged even when all modules are disabled. 1100 */ 1101 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 1102 1103 hw->fw_log.actv_evnts = actv_evnts; 1104 for (i = 0; i < cnt; i++) { 1105 u16 v, m; 1106 1107 if (!enable) { 1108 /* When disabling all FW logging events as part 1109 * of device's de-initialization, the original 1110 * configurations are retained, and can be used 1111 * to reconfigure FW logging later if the device 1112 * is re-initialized. 1113 */ 1114 hw->fw_log.evnts[i].cur = 0; 1115 continue; 1116 } 1117 1118 v = le16_to_cpu(data[i]); 1119 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 1120 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 1121 } 1122 } 1123 1124 out: 1125 devm_kfree(ice_hw_to_dev(hw), data); 1126 1127 return status; 1128 } 1129 1130 /** 1131 * ice_output_fw_log 1132 * @hw: pointer to the HW struct 1133 * @desc: pointer to the AQ message descriptor 1134 * @buf: pointer to the buffer accompanying the AQ message 1135 * 1136 * Formats a FW Log message and outputs it via the standard driver logs. 1137 */ 1138 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 1139 { 1140 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 1141 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 1142 le16_to_cpu(desc->datalen)); 1143 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 1144 } 1145 1146 /** 1147 * ice_get_itr_intrl_gran 1148 * @hw: pointer to the HW struct 1149 * 1150 * Determines the ITR/INTRL granularities based on the maximum aggregate 1151 * bandwidth according to the device's configuration during power-on. 1152 */ 1153 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1154 { 1155 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 1156 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 1157 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 1158 1159 switch (max_agg_bw) { 1160 case ICE_MAX_AGG_BW_200G: 1161 case ICE_MAX_AGG_BW_100G: 1162 case ICE_MAX_AGG_BW_50G: 1163 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1164 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1165 break; 1166 case ICE_MAX_AGG_BW_25G: 1167 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1168 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1169 break; 1170 } 1171 } 1172 1173 /** 1174 * ice_init_hw - main hardware initialization routine 1175 * @hw: pointer to the hardware structure 1176 */ 1177 int ice_init_hw(struct ice_hw *hw) 1178 { 1179 struct ice_aqc_get_phy_caps_data *pcaps; 1180 u16 mac_buf_len; 1181 void *mac_buf; 1182 int status; 1183 1184 /* Set MAC type based on DeviceID */ 1185 status = ice_set_mac_type(hw); 1186 if (status) 1187 return status; 1188 1189 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 1190 PF_FUNC_RID_FUNC_NUM_M) >> 1191 PF_FUNC_RID_FUNC_NUM_S; 1192 1193 status = ice_reset(hw, ICE_RESET_PFR); 1194 if (status) 1195 return status; 1196 1197 ice_get_itr_intrl_gran(hw); 1198 1199 status = ice_create_all_ctrlq(hw); 1200 if (status) 1201 goto err_unroll_cqinit; 1202 1203 /* Enable FW logging. Not fatal if this fails. */ 1204 status = ice_cfg_fw_log(hw, true); 1205 if (status) 1206 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 1207 1208 status = ice_clear_pf_cfg(hw); 1209 if (status) 1210 goto err_unroll_cqinit; 1211 1212 /* Set bit to enable Flow Director filters */ 1213 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1214 INIT_LIST_HEAD(&hw->fdir_list_head); 1215 1216 ice_clear_pxe_mode(hw); 1217 1218 status = ice_init_nvm(hw); 1219 if (status) 1220 goto err_unroll_cqinit; 1221 1222 status = ice_get_caps(hw); 1223 if (status) 1224 goto err_unroll_cqinit; 1225 1226 if (!hw->port_info) 1227 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1228 sizeof(*hw->port_info), 1229 GFP_KERNEL); 1230 if (!hw->port_info) { 1231 status = -ENOMEM; 1232 goto err_unroll_cqinit; 1233 } 1234 1235 /* set the back pointer to HW */ 1236 hw->port_info->hw = hw; 1237 1238 /* Initialize port_info struct with switch configuration data */ 1239 status = ice_get_initial_sw_cfg(hw); 1240 if (status) 1241 goto err_unroll_alloc; 1242 1243 hw->evb_veb = true; 1244 1245 /* init xarray for identifying scheduling nodes uniquely */ 1246 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1247 1248 /* Query the allocated resources for Tx scheduler */ 1249 status = ice_sched_query_res_alloc(hw); 1250 if (status) { 1251 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1252 goto err_unroll_alloc; 1253 } 1254 ice_sched_get_psm_clk_freq(hw); 1255 1256 /* Initialize port_info struct with scheduler data */ 1257 status = ice_sched_init_port(hw->port_info); 1258 if (status) 1259 goto err_unroll_sched; 1260 1261 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1262 if (!pcaps) { 1263 status = -ENOMEM; 1264 goto err_unroll_sched; 1265 } 1266 1267 /* Initialize port_info struct with PHY capabilities */ 1268 status = ice_aq_get_phy_caps(hw->port_info, false, 1269 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1270 NULL); 1271 devm_kfree(ice_hw_to_dev(hw), pcaps); 1272 if (status) 1273 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1274 status); 1275 1276 /* Initialize port_info struct with link information */ 1277 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1278 if (status) 1279 goto err_unroll_sched; 1280 1281 /* need a valid SW entry point to build a Tx tree */ 1282 if (!hw->sw_entry_point_layer) { 1283 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1284 status = -EIO; 1285 goto err_unroll_sched; 1286 } 1287 INIT_LIST_HEAD(&hw->agg_list); 1288 /* Initialize max burst size */ 1289 if (!hw->max_burst_size) 1290 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1291 1292 status = ice_init_fltr_mgmt_struct(hw); 1293 if (status) 1294 goto err_unroll_sched; 1295 1296 /* Get MAC information */ 1297 /* A single port can report up to two (LAN and WoL) addresses */ 1298 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 1299 sizeof(struct ice_aqc_manage_mac_read_resp), 1300 GFP_KERNEL); 1301 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1302 1303 if (!mac_buf) { 1304 status = -ENOMEM; 1305 goto err_unroll_fltr_mgmt_struct; 1306 } 1307 1308 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1309 devm_kfree(ice_hw_to_dev(hw), mac_buf); 1310 1311 if (status) 1312 goto err_unroll_fltr_mgmt_struct; 1313 /* enable jumbo frame support at MAC level */ 1314 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1315 if (status) 1316 goto err_unroll_fltr_mgmt_struct; 1317 /* Obtain counter base index which would be used by flow director */ 1318 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1319 if (status) 1320 goto err_unroll_fltr_mgmt_struct; 1321 status = ice_init_hw_tbls(hw); 1322 if (status) 1323 goto err_unroll_fltr_mgmt_struct; 1324 mutex_init(&hw->tnl_lock); 1325 return 0; 1326 1327 err_unroll_fltr_mgmt_struct: 1328 ice_cleanup_fltr_mgmt_struct(hw); 1329 err_unroll_sched: 1330 ice_sched_cleanup_all(hw); 1331 err_unroll_alloc: 1332 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1333 err_unroll_cqinit: 1334 ice_destroy_all_ctrlq(hw); 1335 return status; 1336 } 1337 1338 /** 1339 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1340 * @hw: pointer to the hardware structure 1341 * 1342 * This should be called only during nominal operation, not as a result of 1343 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1344 * applicable initializations if it fails for any reason. 1345 */ 1346 void ice_deinit_hw(struct ice_hw *hw) 1347 { 1348 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1349 ice_cleanup_fltr_mgmt_struct(hw); 1350 1351 ice_sched_cleanup_all(hw); 1352 ice_sched_clear_agg(hw); 1353 ice_free_seg(hw); 1354 ice_free_hw_tbls(hw); 1355 mutex_destroy(&hw->tnl_lock); 1356 1357 /* Attempt to disable FW logging before shutting down control queues */ 1358 ice_cfg_fw_log(hw, false); 1359 ice_destroy_all_ctrlq(hw); 1360 1361 /* Clear VSI contexts if not already cleared */ 1362 ice_clear_all_vsi_ctx(hw); 1363 } 1364 1365 /** 1366 * ice_check_reset - Check to see if a global reset is complete 1367 * @hw: pointer to the hardware structure 1368 */ 1369 int ice_check_reset(struct ice_hw *hw) 1370 { 1371 u32 cnt, reg = 0, grst_timeout, uld_mask; 1372 1373 /* Poll for Device Active state in case a recent CORER, GLOBR, 1374 * or EMPR has occurred. The grst delay value is in 100ms units. 1375 * Add 1sec for outstanding AQ commands that can take a long time. 1376 */ 1377 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1378 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1379 1380 for (cnt = 0; cnt < grst_timeout; cnt++) { 1381 mdelay(100); 1382 reg = rd32(hw, GLGEN_RSTAT); 1383 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1384 break; 1385 } 1386 1387 if (cnt == grst_timeout) { 1388 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1389 return -EIO; 1390 } 1391 1392 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1393 GLNVM_ULD_PCIER_DONE_1_M |\ 1394 GLNVM_ULD_CORER_DONE_M |\ 1395 GLNVM_ULD_GLOBR_DONE_M |\ 1396 GLNVM_ULD_POR_DONE_M |\ 1397 GLNVM_ULD_POR_DONE_1_M |\ 1398 GLNVM_ULD_PCIER_DONE_2_M) 1399 1400 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1401 GLNVM_ULD_PE_DONE_M : 0); 1402 1403 /* Device is Active; check Global Reset processes are done */ 1404 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1405 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1406 if (reg == uld_mask) { 1407 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1408 break; 1409 } 1410 mdelay(10); 1411 } 1412 1413 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1414 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1415 reg); 1416 return -EIO; 1417 } 1418 1419 return 0; 1420 } 1421 1422 /** 1423 * ice_pf_reset - Reset the PF 1424 * @hw: pointer to the hardware structure 1425 * 1426 * If a global reset has been triggered, this function checks 1427 * for its completion and then issues the PF reset 1428 */ 1429 static int ice_pf_reset(struct ice_hw *hw) 1430 { 1431 u32 cnt, reg; 1432 1433 /* If at function entry a global reset was already in progress, i.e. 1434 * state is not 'device active' or any of the reset done bits are not 1435 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1436 * global reset is done. 1437 */ 1438 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1439 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1440 /* poll on global reset currently in progress until done */ 1441 if (ice_check_reset(hw)) 1442 return -EIO; 1443 1444 return 0; 1445 } 1446 1447 /* Reset the PF */ 1448 reg = rd32(hw, PFGEN_CTRL); 1449 1450 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1451 1452 /* Wait for the PFR to complete. The wait time is the global config lock 1453 * timeout plus the PFR timeout which will account for a possible reset 1454 * that is occurring during a download package operation. 1455 */ 1456 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1457 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1458 reg = rd32(hw, PFGEN_CTRL); 1459 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1460 break; 1461 1462 mdelay(1); 1463 } 1464 1465 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1466 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1467 return -EIO; 1468 } 1469 1470 return 0; 1471 } 1472 1473 /** 1474 * ice_reset - Perform different types of reset 1475 * @hw: pointer to the hardware structure 1476 * @req: reset request 1477 * 1478 * This function triggers a reset as specified by the req parameter. 1479 * 1480 * Note: 1481 * If anything other than a PF reset is triggered, PXE mode is restored. 1482 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1483 * interface has been restored in the rebuild flow. 1484 */ 1485 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1486 { 1487 u32 val = 0; 1488 1489 switch (req) { 1490 case ICE_RESET_PFR: 1491 return ice_pf_reset(hw); 1492 case ICE_RESET_CORER: 1493 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1494 val = GLGEN_RTRIG_CORER_M; 1495 break; 1496 case ICE_RESET_GLOBR: 1497 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1498 val = GLGEN_RTRIG_GLOBR_M; 1499 break; 1500 default: 1501 return -EINVAL; 1502 } 1503 1504 val |= rd32(hw, GLGEN_RTRIG); 1505 wr32(hw, GLGEN_RTRIG, val); 1506 ice_flush(hw); 1507 1508 /* wait for the FW to be ready */ 1509 return ice_check_reset(hw); 1510 } 1511 1512 /** 1513 * ice_copy_rxq_ctx_to_hw 1514 * @hw: pointer to the hardware structure 1515 * @ice_rxq_ctx: pointer to the rxq context 1516 * @rxq_index: the index of the Rx queue 1517 * 1518 * Copies rxq context from dense structure to HW register space 1519 */ 1520 static int 1521 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1522 { 1523 u8 i; 1524 1525 if (!ice_rxq_ctx) 1526 return -EINVAL; 1527 1528 if (rxq_index > QRX_CTRL_MAX_INDEX) 1529 return -EINVAL; 1530 1531 /* Copy each dword separately to HW */ 1532 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1533 wr32(hw, QRX_CONTEXT(i, rxq_index), 1534 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1535 1536 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1537 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1538 } 1539 1540 return 0; 1541 } 1542 1543 /* LAN Rx Queue Context */ 1544 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1545 /* Field Width LSB */ 1546 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1547 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1548 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1549 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1550 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1551 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1552 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1553 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1554 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1555 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1556 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1557 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1558 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1559 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1560 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1561 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1562 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1563 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1564 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1565 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1566 { 0 } 1567 }; 1568 1569 /** 1570 * ice_write_rxq_ctx 1571 * @hw: pointer to the hardware structure 1572 * @rlan_ctx: pointer to the rxq context 1573 * @rxq_index: the index of the Rx queue 1574 * 1575 * Converts rxq context from sparse to dense structure and then writes 1576 * it to HW register space and enables the hardware to prefetch descriptors 1577 * instead of only fetching them on demand 1578 */ 1579 int 1580 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1581 u32 rxq_index) 1582 { 1583 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1584 1585 if (!rlan_ctx) 1586 return -EINVAL; 1587 1588 rlan_ctx->prefena = 1; 1589 1590 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1591 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1592 } 1593 1594 /* LAN Tx Queue Context */ 1595 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1596 /* Field Width LSB */ 1597 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1598 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1599 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1600 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1601 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1602 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1603 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1604 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1605 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1606 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1607 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1608 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1609 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1610 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1611 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1612 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1613 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1614 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1615 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1616 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1617 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1618 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1619 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1620 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1621 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1622 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1623 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1624 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1625 { 0 } 1626 }; 1627 1628 /* Sideband Queue command wrappers */ 1629 1630 /** 1631 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1632 * @hw: pointer to the HW struct 1633 * @desc: descriptor describing the command 1634 * @buf: buffer to use for indirect commands (NULL for direct commands) 1635 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1636 * @cd: pointer to command details structure 1637 */ 1638 static int 1639 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1640 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1641 { 1642 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1643 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1644 } 1645 1646 /** 1647 * ice_sbq_rw_reg - Fill Sideband Queue command 1648 * @hw: pointer to the HW struct 1649 * @in: message info to be filled in descriptor 1650 */ 1651 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1652 { 1653 struct ice_sbq_cmd_desc desc = {0}; 1654 struct ice_sbq_msg_req msg = {0}; 1655 u16 msg_len; 1656 int status; 1657 1658 msg_len = sizeof(msg); 1659 1660 msg.dest_dev = in->dest_dev; 1661 msg.opcode = in->opcode; 1662 msg.flags = ICE_SBQ_MSG_FLAGS; 1663 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1664 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1665 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1666 1667 if (in->opcode) 1668 msg.data = cpu_to_le32(in->data); 1669 else 1670 /* data read comes back in completion, so shorten the struct by 1671 * sizeof(msg.data) 1672 */ 1673 msg_len -= sizeof(msg.data); 1674 1675 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1676 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1677 desc.param0.cmd_len = cpu_to_le16(msg_len); 1678 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1679 if (!status && !in->opcode) 1680 in->data = le32_to_cpu 1681 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1682 return status; 1683 } 1684 1685 /* FW Admin Queue command wrappers */ 1686 1687 /* Software lock/mutex that is meant to be held while the Global Config Lock 1688 * in firmware is acquired by the software to prevent most (but not all) types 1689 * of AQ commands from being sent to FW 1690 */ 1691 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1692 1693 /** 1694 * ice_should_retry_sq_send_cmd 1695 * @opcode: AQ opcode 1696 * 1697 * Decide if we should retry the send command routine for the ATQ, depending 1698 * on the opcode. 1699 */ 1700 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1701 { 1702 switch (opcode) { 1703 case ice_aqc_opc_get_link_topo: 1704 case ice_aqc_opc_lldp_stop: 1705 case ice_aqc_opc_lldp_start: 1706 case ice_aqc_opc_lldp_filter_ctrl: 1707 return true; 1708 } 1709 1710 return false; 1711 } 1712 1713 /** 1714 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1715 * @hw: pointer to the HW struct 1716 * @cq: pointer to the specific Control queue 1717 * @desc: prefilled descriptor describing the command 1718 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1719 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1720 * @cd: pointer to command details structure 1721 * 1722 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1723 * Queue if the EBUSY AQ error is returned. 1724 */ 1725 static int 1726 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1727 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1728 struct ice_sq_cd *cd) 1729 { 1730 struct ice_aq_desc desc_cpy; 1731 bool is_cmd_for_retry; 1732 u8 idx = 0; 1733 u16 opcode; 1734 int status; 1735 1736 opcode = le16_to_cpu(desc->opcode); 1737 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1738 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1739 1740 if (is_cmd_for_retry) { 1741 /* All retryable cmds are direct, without buf. */ 1742 WARN_ON(buf); 1743 1744 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1745 } 1746 1747 do { 1748 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1749 1750 if (!is_cmd_for_retry || !status || 1751 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1752 break; 1753 1754 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1755 1756 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1757 1758 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1759 1760 return status; 1761 } 1762 1763 /** 1764 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1765 * @hw: pointer to the HW struct 1766 * @desc: descriptor describing the command 1767 * @buf: buffer to use for indirect commands (NULL for direct commands) 1768 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1769 * @cd: pointer to command details structure 1770 * 1771 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1772 */ 1773 int 1774 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1775 u16 buf_size, struct ice_sq_cd *cd) 1776 { 1777 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1778 bool lock_acquired = false; 1779 int status; 1780 1781 /* When a package download is in process (i.e. when the firmware's 1782 * Global Configuration Lock resource is held), only the Download 1783 * Package, Get Version, Get Package Info List, Upload Section, 1784 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1785 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1786 * Recipes to Profile Association, and Release Resource (with resource 1787 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1788 * must block until the package download completes and the Global Config 1789 * Lock is released. See also ice_acquire_global_cfg_lock(). 1790 */ 1791 switch (le16_to_cpu(desc->opcode)) { 1792 case ice_aqc_opc_download_pkg: 1793 case ice_aqc_opc_get_pkg_info_list: 1794 case ice_aqc_opc_get_ver: 1795 case ice_aqc_opc_upload_section: 1796 case ice_aqc_opc_update_pkg: 1797 case ice_aqc_opc_set_port_params: 1798 case ice_aqc_opc_get_vlan_mode_parameters: 1799 case ice_aqc_opc_set_vlan_mode_parameters: 1800 case ice_aqc_opc_add_recipe: 1801 case ice_aqc_opc_recipe_to_profile: 1802 case ice_aqc_opc_get_recipe: 1803 case ice_aqc_opc_get_recipe_to_profile: 1804 break; 1805 case ice_aqc_opc_release_res: 1806 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1807 break; 1808 fallthrough; 1809 default: 1810 mutex_lock(&ice_global_cfg_lock_sw); 1811 lock_acquired = true; 1812 break; 1813 } 1814 1815 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1816 if (lock_acquired) 1817 mutex_unlock(&ice_global_cfg_lock_sw); 1818 1819 return status; 1820 } 1821 1822 /** 1823 * ice_aq_get_fw_ver 1824 * @hw: pointer to the HW struct 1825 * @cd: pointer to command details structure or NULL 1826 * 1827 * Get the firmware version (0x0001) from the admin queue commands 1828 */ 1829 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1830 { 1831 struct ice_aqc_get_ver *resp; 1832 struct ice_aq_desc desc; 1833 int status; 1834 1835 resp = &desc.params.get_ver; 1836 1837 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1838 1839 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1840 1841 if (!status) { 1842 hw->fw_branch = resp->fw_branch; 1843 hw->fw_maj_ver = resp->fw_major; 1844 hw->fw_min_ver = resp->fw_minor; 1845 hw->fw_patch = resp->fw_patch; 1846 hw->fw_build = le32_to_cpu(resp->fw_build); 1847 hw->api_branch = resp->api_branch; 1848 hw->api_maj_ver = resp->api_major; 1849 hw->api_min_ver = resp->api_minor; 1850 hw->api_patch = resp->api_patch; 1851 } 1852 1853 return status; 1854 } 1855 1856 /** 1857 * ice_aq_send_driver_ver 1858 * @hw: pointer to the HW struct 1859 * @dv: driver's major, minor version 1860 * @cd: pointer to command details structure or NULL 1861 * 1862 * Send the driver version (0x0002) to the firmware 1863 */ 1864 int 1865 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1866 struct ice_sq_cd *cd) 1867 { 1868 struct ice_aqc_driver_ver *cmd; 1869 struct ice_aq_desc desc; 1870 u16 len; 1871 1872 cmd = &desc.params.driver_ver; 1873 1874 if (!dv) 1875 return -EINVAL; 1876 1877 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1878 1879 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1880 cmd->major_ver = dv->major_ver; 1881 cmd->minor_ver = dv->minor_ver; 1882 cmd->build_ver = dv->build_ver; 1883 cmd->subbuild_ver = dv->subbuild_ver; 1884 1885 len = 0; 1886 while (len < sizeof(dv->driver_string) && 1887 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1888 len++; 1889 1890 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1891 } 1892 1893 /** 1894 * ice_aq_q_shutdown 1895 * @hw: pointer to the HW struct 1896 * @unloading: is the driver unloading itself 1897 * 1898 * Tell the Firmware that we're shutting down the AdminQ and whether 1899 * or not the driver is unloading as well (0x0003). 1900 */ 1901 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1902 { 1903 struct ice_aqc_q_shutdown *cmd; 1904 struct ice_aq_desc desc; 1905 1906 cmd = &desc.params.q_shutdown; 1907 1908 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1909 1910 if (unloading) 1911 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1912 1913 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1914 } 1915 1916 /** 1917 * ice_aq_req_res 1918 * @hw: pointer to the HW struct 1919 * @res: resource ID 1920 * @access: access type 1921 * @sdp_number: resource number 1922 * @timeout: the maximum time in ms that the driver may hold the resource 1923 * @cd: pointer to command details structure or NULL 1924 * 1925 * Requests common resource using the admin queue commands (0x0008). 1926 * When attempting to acquire the Global Config Lock, the driver can 1927 * learn of three states: 1928 * 1) 0 - acquired lock, and can perform download package 1929 * 2) -EIO - did not get lock, driver should fail to load 1930 * 3) -EALREADY - did not get lock, but another driver has 1931 * successfully downloaded the package; the driver does 1932 * not have to download the package and can continue 1933 * loading 1934 * 1935 * Note that if the caller is in an acquire lock, perform action, release lock 1936 * phase of operation, it is possible that the FW may detect a timeout and issue 1937 * a CORER. In this case, the driver will receive a CORER interrupt and will 1938 * have to determine its cause. The calling thread that is handling this flow 1939 * will likely get an error propagated back to it indicating the Download 1940 * Package, Update Package or the Release Resource AQ commands timed out. 1941 */ 1942 static int 1943 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1944 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1945 struct ice_sq_cd *cd) 1946 { 1947 struct ice_aqc_req_res *cmd_resp; 1948 struct ice_aq_desc desc; 1949 int status; 1950 1951 cmd_resp = &desc.params.res_owner; 1952 1953 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1954 1955 cmd_resp->res_id = cpu_to_le16(res); 1956 cmd_resp->access_type = cpu_to_le16(access); 1957 cmd_resp->res_number = cpu_to_le32(sdp_number); 1958 cmd_resp->timeout = cpu_to_le32(*timeout); 1959 *timeout = 0; 1960 1961 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1962 1963 /* The completion specifies the maximum time in ms that the driver 1964 * may hold the resource in the Timeout field. 1965 */ 1966 1967 /* Global config lock response utilizes an additional status field. 1968 * 1969 * If the Global config lock resource is held by some other driver, the 1970 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1971 * and the timeout field indicates the maximum time the current owner 1972 * of the resource has to free it. 1973 */ 1974 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1975 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1976 *timeout = le32_to_cpu(cmd_resp->timeout); 1977 return 0; 1978 } else if (le16_to_cpu(cmd_resp->status) == 1979 ICE_AQ_RES_GLBL_IN_PROG) { 1980 *timeout = le32_to_cpu(cmd_resp->timeout); 1981 return -EIO; 1982 } else if (le16_to_cpu(cmd_resp->status) == 1983 ICE_AQ_RES_GLBL_DONE) { 1984 return -EALREADY; 1985 } 1986 1987 /* invalid FW response, force a timeout immediately */ 1988 *timeout = 0; 1989 return -EIO; 1990 } 1991 1992 /* If the resource is held by some other driver, the command completes 1993 * with a busy return value and the timeout field indicates the maximum 1994 * time the current owner of the resource has to free it. 1995 */ 1996 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1997 *timeout = le32_to_cpu(cmd_resp->timeout); 1998 1999 return status; 2000 } 2001 2002 /** 2003 * ice_aq_release_res 2004 * @hw: pointer to the HW struct 2005 * @res: resource ID 2006 * @sdp_number: resource number 2007 * @cd: pointer to command details structure or NULL 2008 * 2009 * release common resource using the admin queue commands (0x0009) 2010 */ 2011 static int 2012 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 2013 struct ice_sq_cd *cd) 2014 { 2015 struct ice_aqc_req_res *cmd; 2016 struct ice_aq_desc desc; 2017 2018 cmd = &desc.params.res_owner; 2019 2020 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 2021 2022 cmd->res_id = cpu_to_le16(res); 2023 cmd->res_number = cpu_to_le32(sdp_number); 2024 2025 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2026 } 2027 2028 /** 2029 * ice_acquire_res 2030 * @hw: pointer to the HW structure 2031 * @res: resource ID 2032 * @access: access type (read or write) 2033 * @timeout: timeout in milliseconds 2034 * 2035 * This function will attempt to acquire the ownership of a resource. 2036 */ 2037 int 2038 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 2039 enum ice_aq_res_access_type access, u32 timeout) 2040 { 2041 #define ICE_RES_POLLING_DELAY_MS 10 2042 u32 delay = ICE_RES_POLLING_DELAY_MS; 2043 u32 time_left = timeout; 2044 int status; 2045 2046 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2047 2048 /* A return code of -EALREADY means that another driver has 2049 * previously acquired the resource and performed any necessary updates; 2050 * in this case the caller does not obtain the resource and has no 2051 * further work to do. 2052 */ 2053 if (status == -EALREADY) 2054 goto ice_acquire_res_exit; 2055 2056 if (status) 2057 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 2058 2059 /* If necessary, poll until the current lock owner timeouts */ 2060 timeout = time_left; 2061 while (status && timeout && time_left) { 2062 mdelay(delay); 2063 timeout = (timeout > delay) ? timeout - delay : 0; 2064 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2065 2066 if (status == -EALREADY) 2067 /* lock free, but no work to do */ 2068 break; 2069 2070 if (!status) 2071 /* lock acquired */ 2072 break; 2073 } 2074 if (status && status != -EALREADY) 2075 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 2076 2077 ice_acquire_res_exit: 2078 if (status == -EALREADY) { 2079 if (access == ICE_RES_WRITE) 2080 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 2081 else 2082 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 2083 } 2084 return status; 2085 } 2086 2087 /** 2088 * ice_release_res 2089 * @hw: pointer to the HW structure 2090 * @res: resource ID 2091 * 2092 * This function will release a resource using the proper Admin Command. 2093 */ 2094 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 2095 { 2096 unsigned long timeout; 2097 int status; 2098 2099 /* there are some rare cases when trying to release the resource 2100 * results in an admin queue timeout, so handle them correctly 2101 */ 2102 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 2103 do { 2104 status = ice_aq_release_res(hw, res, 0, NULL); 2105 if (status != -EIO) 2106 break; 2107 usleep_range(1000, 2000); 2108 } while (time_before(jiffies, timeout)); 2109 } 2110 2111 /** 2112 * ice_aq_alloc_free_res - command to allocate/free resources 2113 * @hw: pointer to the HW struct 2114 * @buf: Indirect buffer to hold data parameters and response 2115 * @buf_size: size of buffer for indirect commands 2116 * @opc: pass in the command opcode 2117 * 2118 * Helper function to allocate/free resources using the admin queue commands 2119 */ 2120 int ice_aq_alloc_free_res(struct ice_hw *hw, 2121 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2122 enum ice_adminq_opc opc) 2123 { 2124 struct ice_aqc_alloc_free_res_cmd *cmd; 2125 struct ice_aq_desc desc; 2126 2127 cmd = &desc.params.sw_res_ctrl; 2128 2129 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 2130 return -EINVAL; 2131 2132 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2133 2134 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2135 2136 cmd->num_entries = cpu_to_le16(1); 2137 2138 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 2139 } 2140 2141 /** 2142 * ice_alloc_hw_res - allocate resource 2143 * @hw: pointer to the HW struct 2144 * @type: type of resource 2145 * @num: number of resources to allocate 2146 * @btm: allocate from bottom 2147 * @res: pointer to array that will receive the resources 2148 */ 2149 int 2150 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2151 { 2152 struct ice_aqc_alloc_free_res_elem *buf; 2153 u16 buf_len; 2154 int status; 2155 2156 buf_len = struct_size(buf, elem, num); 2157 buf = kzalloc(buf_len, GFP_KERNEL); 2158 if (!buf) 2159 return -ENOMEM; 2160 2161 /* Prepare buffer to allocate resource. */ 2162 buf->num_elems = cpu_to_le16(num); 2163 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2164 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2165 if (btm) 2166 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2167 2168 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 2169 if (status) 2170 goto ice_alloc_res_exit; 2171 2172 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2173 2174 ice_alloc_res_exit: 2175 kfree(buf); 2176 return status; 2177 } 2178 2179 /** 2180 * ice_free_hw_res - free allocated HW resource 2181 * @hw: pointer to the HW struct 2182 * @type: type of resource to free 2183 * @num: number of resources 2184 * @res: pointer to array that contains the resources to free 2185 */ 2186 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2187 { 2188 struct ice_aqc_alloc_free_res_elem *buf; 2189 u16 buf_len; 2190 int status; 2191 2192 buf_len = struct_size(buf, elem, num); 2193 buf = kzalloc(buf_len, GFP_KERNEL); 2194 if (!buf) 2195 return -ENOMEM; 2196 2197 /* Prepare buffer to free resource. */ 2198 buf->num_elems = cpu_to_le16(num); 2199 buf->res_type = cpu_to_le16(type); 2200 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2201 2202 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2203 if (status) 2204 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2205 2206 kfree(buf); 2207 return status; 2208 } 2209 2210 /** 2211 * ice_get_num_per_func - determine number of resources per PF 2212 * @hw: pointer to the HW structure 2213 * @max: value to be evenly split between each PF 2214 * 2215 * Determine the number of valid functions by going through the bitmap returned 2216 * from parsing capabilities and use this to calculate the number of resources 2217 * per PF based on the max value passed in. 2218 */ 2219 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2220 { 2221 u8 funcs; 2222 2223 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2224 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2225 ICE_CAPS_VALID_FUNCS_M); 2226 2227 if (!funcs) 2228 return 0; 2229 2230 return max / funcs; 2231 } 2232 2233 /** 2234 * ice_parse_common_caps - parse common device/function capabilities 2235 * @hw: pointer to the HW struct 2236 * @caps: pointer to common capabilities structure 2237 * @elem: the capability element to parse 2238 * @prefix: message prefix for tracing capabilities 2239 * 2240 * Given a capability element, extract relevant details into the common 2241 * capability structure. 2242 * 2243 * Returns: true if the capability matches one of the common capability ids, 2244 * false otherwise. 2245 */ 2246 static bool 2247 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2248 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2249 { 2250 u32 logical_id = le32_to_cpu(elem->logical_id); 2251 u32 phys_id = le32_to_cpu(elem->phys_id); 2252 u32 number = le32_to_cpu(elem->number); 2253 u16 cap = le16_to_cpu(elem->cap); 2254 bool found = true; 2255 2256 switch (cap) { 2257 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2258 caps->valid_functions = number; 2259 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2260 caps->valid_functions); 2261 break; 2262 case ICE_AQC_CAPS_SRIOV: 2263 caps->sr_iov_1_1 = (number == 1); 2264 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2265 caps->sr_iov_1_1); 2266 break; 2267 case ICE_AQC_CAPS_DCB: 2268 caps->dcb = (number == 1); 2269 caps->active_tc_bitmap = logical_id; 2270 caps->maxtc = phys_id; 2271 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2272 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2273 caps->active_tc_bitmap); 2274 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2275 break; 2276 case ICE_AQC_CAPS_RSS: 2277 caps->rss_table_size = number; 2278 caps->rss_table_entry_width = logical_id; 2279 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2280 caps->rss_table_size); 2281 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2282 caps->rss_table_entry_width); 2283 break; 2284 case ICE_AQC_CAPS_RXQS: 2285 caps->num_rxq = number; 2286 caps->rxq_first_id = phys_id; 2287 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2288 caps->num_rxq); 2289 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2290 caps->rxq_first_id); 2291 break; 2292 case ICE_AQC_CAPS_TXQS: 2293 caps->num_txq = number; 2294 caps->txq_first_id = phys_id; 2295 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2296 caps->num_txq); 2297 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2298 caps->txq_first_id); 2299 break; 2300 case ICE_AQC_CAPS_MSIX: 2301 caps->num_msix_vectors = number; 2302 caps->msix_vector_first_id = phys_id; 2303 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2304 caps->num_msix_vectors); 2305 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2306 caps->msix_vector_first_id); 2307 break; 2308 case ICE_AQC_CAPS_PENDING_NVM_VER: 2309 caps->nvm_update_pending_nvm = true; 2310 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2311 break; 2312 case ICE_AQC_CAPS_PENDING_OROM_VER: 2313 caps->nvm_update_pending_orom = true; 2314 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2315 break; 2316 case ICE_AQC_CAPS_PENDING_NET_VER: 2317 caps->nvm_update_pending_netlist = true; 2318 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2319 break; 2320 case ICE_AQC_CAPS_NVM_MGMT: 2321 caps->nvm_unified_update = 2322 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2323 true : false; 2324 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2325 caps->nvm_unified_update); 2326 break; 2327 case ICE_AQC_CAPS_RDMA: 2328 caps->rdma = (number == 1); 2329 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2330 break; 2331 case ICE_AQC_CAPS_MAX_MTU: 2332 caps->max_mtu = number; 2333 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2334 prefix, caps->max_mtu); 2335 break; 2336 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2337 caps->pcie_reset_avoidance = (number > 0); 2338 ice_debug(hw, ICE_DBG_INIT, 2339 "%s: pcie_reset_avoidance = %d\n", prefix, 2340 caps->pcie_reset_avoidance); 2341 break; 2342 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2343 caps->reset_restrict_support = (number == 1); 2344 ice_debug(hw, ICE_DBG_INIT, 2345 "%s: reset_restrict_support = %d\n", prefix, 2346 caps->reset_restrict_support); 2347 break; 2348 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2349 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2350 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2351 prefix, caps->roce_lag); 2352 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2353 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2354 prefix, caps->sriov_lag); 2355 break; 2356 default: 2357 /* Not one of the recognized common capabilities */ 2358 found = false; 2359 } 2360 2361 return found; 2362 } 2363 2364 /** 2365 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2366 * @hw: pointer to the HW structure 2367 * @caps: pointer to capabilities structure to fix 2368 * 2369 * Re-calculate the capabilities that are dependent on the number of physical 2370 * ports; i.e. some features are not supported or function differently on 2371 * devices with more than 4 ports. 2372 */ 2373 static void 2374 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2375 { 2376 /* This assumes device capabilities are always scanned before function 2377 * capabilities during the initialization flow. 2378 */ 2379 if (hw->dev_caps.num_funcs > 4) { 2380 /* Max 4 TCs per port */ 2381 caps->maxtc = 4; 2382 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2383 caps->maxtc); 2384 if (caps->rdma) { 2385 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2386 caps->rdma = 0; 2387 } 2388 2389 /* print message only when processing device capabilities 2390 * during initialization. 2391 */ 2392 if (caps == &hw->dev_caps.common_cap) 2393 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2394 } 2395 } 2396 2397 /** 2398 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2399 * @hw: pointer to the HW struct 2400 * @func_p: pointer to function capabilities structure 2401 * @cap: pointer to the capability element to parse 2402 * 2403 * Extract function capabilities for ICE_AQC_CAPS_VF. 2404 */ 2405 static void 2406 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2407 struct ice_aqc_list_caps_elem *cap) 2408 { 2409 u32 logical_id = le32_to_cpu(cap->logical_id); 2410 u32 number = le32_to_cpu(cap->number); 2411 2412 func_p->num_allocd_vfs = number; 2413 func_p->vf_base_id = logical_id; 2414 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2415 func_p->num_allocd_vfs); 2416 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2417 func_p->vf_base_id); 2418 } 2419 2420 /** 2421 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2422 * @hw: pointer to the HW struct 2423 * @func_p: pointer to function capabilities structure 2424 * @cap: pointer to the capability element to parse 2425 * 2426 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2427 */ 2428 static void 2429 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2430 struct ice_aqc_list_caps_elem *cap) 2431 { 2432 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2433 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2434 le32_to_cpu(cap->number)); 2435 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2436 func_p->guar_num_vsi); 2437 } 2438 2439 /** 2440 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2441 * @hw: pointer to the HW struct 2442 * @func_p: pointer to function capabilities structure 2443 * @cap: pointer to the capability element to parse 2444 * 2445 * Extract function capabilities for ICE_AQC_CAPS_1588. 2446 */ 2447 static void 2448 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2449 struct ice_aqc_list_caps_elem *cap) 2450 { 2451 struct ice_ts_func_info *info = &func_p->ts_func_info; 2452 u32 number = le32_to_cpu(cap->number); 2453 2454 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2455 func_p->common_cap.ieee_1588 = info->ena; 2456 2457 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2458 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2459 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2460 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2461 2462 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; 2463 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2464 2465 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2466 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2467 } else { 2468 /* Unknown clock frequency, so assume a (probably incorrect) 2469 * default to avoid out-of-bounds look ups of frequency 2470 * related information. 2471 */ 2472 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2473 info->clk_freq); 2474 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2475 } 2476 2477 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2478 func_p->common_cap.ieee_1588); 2479 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2480 info->src_tmr_owned); 2481 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2482 info->tmr_ena); 2483 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2484 info->tmr_index_owned); 2485 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2486 info->tmr_index_assoc); 2487 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2488 info->clk_freq); 2489 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2490 info->clk_src); 2491 } 2492 2493 /** 2494 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2495 * @hw: pointer to the HW struct 2496 * @func_p: pointer to function capabilities structure 2497 * 2498 * Extract function capabilities for ICE_AQC_CAPS_FD. 2499 */ 2500 static void 2501 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2502 { 2503 u32 reg_val, gsize, bsize; 2504 2505 reg_val = rd32(hw, GLQF_FD_SIZE); 2506 switch (hw->mac_type) { 2507 case ICE_MAC_E830: 2508 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2509 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2510 break; 2511 case ICE_MAC_E810: 2512 default: 2513 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2514 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2515 } 2516 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2517 func_p->fd_fltr_best_effort = bsize; 2518 2519 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2520 func_p->fd_fltr_guar); 2521 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2522 func_p->fd_fltr_best_effort); 2523 } 2524 2525 /** 2526 * ice_parse_func_caps - Parse function capabilities 2527 * @hw: pointer to the HW struct 2528 * @func_p: pointer to function capabilities structure 2529 * @buf: buffer containing the function capability records 2530 * @cap_count: the number of capabilities 2531 * 2532 * Helper function to parse function (0x000A) capabilities list. For 2533 * capabilities shared between device and function, this relies on 2534 * ice_parse_common_caps. 2535 * 2536 * Loop through the list of provided capabilities and extract the relevant 2537 * data into the function capabilities structured. 2538 */ 2539 static void 2540 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2541 void *buf, u32 cap_count) 2542 { 2543 struct ice_aqc_list_caps_elem *cap_resp; 2544 u32 i; 2545 2546 cap_resp = buf; 2547 2548 memset(func_p, 0, sizeof(*func_p)); 2549 2550 for (i = 0; i < cap_count; i++) { 2551 u16 cap = le16_to_cpu(cap_resp[i].cap); 2552 bool found; 2553 2554 found = ice_parse_common_caps(hw, &func_p->common_cap, 2555 &cap_resp[i], "func caps"); 2556 2557 switch (cap) { 2558 case ICE_AQC_CAPS_VF: 2559 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2560 break; 2561 case ICE_AQC_CAPS_VSI: 2562 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2563 break; 2564 case ICE_AQC_CAPS_1588: 2565 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2566 break; 2567 case ICE_AQC_CAPS_FD: 2568 ice_parse_fdir_func_caps(hw, func_p); 2569 break; 2570 default: 2571 /* Don't list common capabilities as unknown */ 2572 if (!found) 2573 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2574 i, cap); 2575 break; 2576 } 2577 } 2578 2579 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2580 } 2581 2582 /** 2583 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2584 * @hw: pointer to the HW struct 2585 * @dev_p: pointer to device capabilities structure 2586 * @cap: capability element to parse 2587 * 2588 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2589 */ 2590 static void 2591 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2592 struct ice_aqc_list_caps_elem *cap) 2593 { 2594 u32 number = le32_to_cpu(cap->number); 2595 2596 dev_p->num_funcs = hweight32(number); 2597 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2598 dev_p->num_funcs); 2599 } 2600 2601 /** 2602 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2603 * @hw: pointer to the HW struct 2604 * @dev_p: pointer to device capabilities structure 2605 * @cap: capability element to parse 2606 * 2607 * Parse ICE_AQC_CAPS_VF for device capabilities. 2608 */ 2609 static void 2610 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2611 struct ice_aqc_list_caps_elem *cap) 2612 { 2613 u32 number = le32_to_cpu(cap->number); 2614 2615 dev_p->num_vfs_exposed = number; 2616 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2617 dev_p->num_vfs_exposed); 2618 } 2619 2620 /** 2621 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2622 * @hw: pointer to the HW struct 2623 * @dev_p: pointer to device capabilities structure 2624 * @cap: capability element to parse 2625 * 2626 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2627 */ 2628 static void 2629 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2630 struct ice_aqc_list_caps_elem *cap) 2631 { 2632 u32 number = le32_to_cpu(cap->number); 2633 2634 dev_p->num_vsi_allocd_to_host = number; 2635 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2636 dev_p->num_vsi_allocd_to_host); 2637 } 2638 2639 /** 2640 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2641 * @hw: pointer to the HW struct 2642 * @dev_p: pointer to device capabilities structure 2643 * @cap: capability element to parse 2644 * 2645 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2646 */ 2647 static void 2648 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2649 struct ice_aqc_list_caps_elem *cap) 2650 { 2651 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2652 u32 logical_id = le32_to_cpu(cap->logical_id); 2653 u32 phys_id = le32_to_cpu(cap->phys_id); 2654 u32 number = le32_to_cpu(cap->number); 2655 2656 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2657 dev_p->common_cap.ieee_1588 = info->ena; 2658 2659 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2660 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2661 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2662 2663 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; 2664 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2665 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2666 2667 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2668 2669 info->ena_ports = logical_id; 2670 info->tmr_own_map = phys_id; 2671 2672 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2673 dev_p->common_cap.ieee_1588); 2674 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2675 info->tmr0_owner); 2676 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2677 info->tmr0_owned); 2678 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2679 info->tmr0_ena); 2680 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2681 info->tmr1_owner); 2682 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2683 info->tmr1_owned); 2684 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2685 info->tmr1_ena); 2686 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2687 info->ts_ll_read); 2688 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2689 info->ena_ports); 2690 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2691 info->tmr_own_map); 2692 } 2693 2694 /** 2695 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2696 * @hw: pointer to the HW struct 2697 * @dev_p: pointer to device capabilities structure 2698 * @cap: capability element to parse 2699 * 2700 * Parse ICE_AQC_CAPS_FD for device capabilities. 2701 */ 2702 static void 2703 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2704 struct ice_aqc_list_caps_elem *cap) 2705 { 2706 u32 number = le32_to_cpu(cap->number); 2707 2708 dev_p->num_flow_director_fltr = number; 2709 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2710 dev_p->num_flow_director_fltr); 2711 } 2712 2713 /** 2714 * ice_parse_dev_caps - Parse device capabilities 2715 * @hw: pointer to the HW struct 2716 * @dev_p: pointer to device capabilities structure 2717 * @buf: buffer containing the device capability records 2718 * @cap_count: the number of capabilities 2719 * 2720 * Helper device to parse device (0x000B) capabilities list. For 2721 * capabilities shared between device and function, this relies on 2722 * ice_parse_common_caps. 2723 * 2724 * Loop through the list of provided capabilities and extract the relevant 2725 * data into the device capabilities structured. 2726 */ 2727 static void 2728 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2729 void *buf, u32 cap_count) 2730 { 2731 struct ice_aqc_list_caps_elem *cap_resp; 2732 u32 i; 2733 2734 cap_resp = buf; 2735 2736 memset(dev_p, 0, sizeof(*dev_p)); 2737 2738 for (i = 0; i < cap_count; i++) { 2739 u16 cap = le16_to_cpu(cap_resp[i].cap); 2740 bool found; 2741 2742 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2743 &cap_resp[i], "dev caps"); 2744 2745 switch (cap) { 2746 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2747 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2748 break; 2749 case ICE_AQC_CAPS_VF: 2750 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2751 break; 2752 case ICE_AQC_CAPS_VSI: 2753 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2754 break; 2755 case ICE_AQC_CAPS_1588: 2756 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2757 break; 2758 case ICE_AQC_CAPS_FD: 2759 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2760 break; 2761 default: 2762 /* Don't list common capabilities as unknown */ 2763 if (!found) 2764 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2765 i, cap); 2766 break; 2767 } 2768 } 2769 2770 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2771 } 2772 2773 /** 2774 * ice_is_pf_c827 - check if pf contains c827 phy 2775 * @hw: pointer to the hw struct 2776 */ 2777 bool ice_is_pf_c827(struct ice_hw *hw) 2778 { 2779 struct ice_aqc_get_link_topo cmd = {}; 2780 u8 node_part_number; 2781 u16 node_handle; 2782 int status; 2783 2784 if (hw->mac_type != ICE_MAC_E810) 2785 return false; 2786 2787 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2788 return true; 2789 2790 cmd.addr.topo_params.node_type_ctx = 2791 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2792 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2793 cmd.addr.topo_params.index = 0; 2794 2795 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2796 &node_handle); 2797 2798 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2799 return false; 2800 2801 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2802 return true; 2803 2804 return false; 2805 } 2806 2807 /** 2808 * ice_is_phy_rclk_in_netlist 2809 * @hw: pointer to the hw struct 2810 * 2811 * Check if the PHY Recovered Clock device is present in the netlist 2812 */ 2813 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2814 { 2815 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2816 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2817 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2818 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2819 return false; 2820 2821 return true; 2822 } 2823 2824 /** 2825 * ice_is_clock_mux_in_netlist 2826 * @hw: pointer to the hw struct 2827 * 2828 * Check if the Clock Multiplexer device is present in the netlist 2829 */ 2830 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2831 { 2832 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2833 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2834 NULL)) 2835 return false; 2836 2837 return true; 2838 } 2839 2840 /** 2841 * ice_is_cgu_in_netlist - check for CGU presence 2842 * @hw: pointer to the hw struct 2843 * 2844 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2845 * Save the CGU part number in the hw structure for later use. 2846 * Return: 2847 * * true - cgu is present 2848 * * false - cgu is not present 2849 */ 2850 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2851 { 2852 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2853 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2854 NULL)) { 2855 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2856 return true; 2857 } else if (!ice_find_netlist_node(hw, 2858 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2859 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2860 NULL)) { 2861 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2862 return true; 2863 } 2864 2865 return false; 2866 } 2867 2868 /** 2869 * ice_is_gps_in_netlist 2870 * @hw: pointer to the hw struct 2871 * 2872 * Check if the GPS generic device is present in the netlist 2873 */ 2874 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2875 { 2876 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2877 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2878 return false; 2879 2880 return true; 2881 } 2882 2883 /** 2884 * ice_aq_list_caps - query function/device capabilities 2885 * @hw: pointer to the HW struct 2886 * @buf: a buffer to hold the capabilities 2887 * @buf_size: size of the buffer 2888 * @cap_count: if not NULL, set to the number of capabilities reported 2889 * @opc: capabilities type to discover, device or function 2890 * @cd: pointer to command details structure or NULL 2891 * 2892 * Get the function (0x000A) or device (0x000B) capabilities description from 2893 * firmware and store it in the buffer. 2894 * 2895 * If the cap_count pointer is not NULL, then it is set to the number of 2896 * capabilities firmware will report. Note that if the buffer size is too 2897 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2898 * cap_count will still be updated in this case. It is recommended that the 2899 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2900 * firmware could return) to avoid this. 2901 */ 2902 int 2903 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2904 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2905 { 2906 struct ice_aqc_list_caps *cmd; 2907 struct ice_aq_desc desc; 2908 int status; 2909 2910 cmd = &desc.params.get_cap; 2911 2912 if (opc != ice_aqc_opc_list_func_caps && 2913 opc != ice_aqc_opc_list_dev_caps) 2914 return -EINVAL; 2915 2916 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2917 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2918 2919 if (cap_count) 2920 *cap_count = le32_to_cpu(cmd->count); 2921 2922 return status; 2923 } 2924 2925 /** 2926 * ice_discover_dev_caps - Read and extract device capabilities 2927 * @hw: pointer to the hardware structure 2928 * @dev_caps: pointer to device capabilities structure 2929 * 2930 * Read the device capabilities and extract them into the dev_caps structure 2931 * for later use. 2932 */ 2933 int 2934 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2935 { 2936 u32 cap_count = 0; 2937 void *cbuf; 2938 int status; 2939 2940 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2941 if (!cbuf) 2942 return -ENOMEM; 2943 2944 /* Although the driver doesn't know the number of capabilities the 2945 * device will return, we can simply send a 4KB buffer, the maximum 2946 * possible size that firmware can return. 2947 */ 2948 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2949 2950 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2951 ice_aqc_opc_list_dev_caps, NULL); 2952 if (!status) 2953 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2954 kfree(cbuf); 2955 2956 return status; 2957 } 2958 2959 /** 2960 * ice_discover_func_caps - Read and extract function capabilities 2961 * @hw: pointer to the hardware structure 2962 * @func_caps: pointer to function capabilities structure 2963 * 2964 * Read the function capabilities and extract them into the func_caps structure 2965 * for later use. 2966 */ 2967 static int 2968 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2969 { 2970 u32 cap_count = 0; 2971 void *cbuf; 2972 int status; 2973 2974 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2975 if (!cbuf) 2976 return -ENOMEM; 2977 2978 /* Although the driver doesn't know the number of capabilities the 2979 * device will return, we can simply send a 4KB buffer, the maximum 2980 * possible size that firmware can return. 2981 */ 2982 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2983 2984 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2985 ice_aqc_opc_list_func_caps, NULL); 2986 if (!status) 2987 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2988 kfree(cbuf); 2989 2990 return status; 2991 } 2992 2993 /** 2994 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2995 * @hw: pointer to the hardware structure 2996 */ 2997 void ice_set_safe_mode_caps(struct ice_hw *hw) 2998 { 2999 struct ice_hw_func_caps *func_caps = &hw->func_caps; 3000 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 3001 struct ice_hw_common_caps cached_caps; 3002 u32 num_funcs; 3003 3004 /* cache some func_caps values that should be restored after memset */ 3005 cached_caps = func_caps->common_cap; 3006 3007 /* unset func capabilities */ 3008 memset(func_caps, 0, sizeof(*func_caps)); 3009 3010 #define ICE_RESTORE_FUNC_CAP(name) \ 3011 func_caps->common_cap.name = cached_caps.name 3012 3013 /* restore cached values */ 3014 ICE_RESTORE_FUNC_CAP(valid_functions); 3015 ICE_RESTORE_FUNC_CAP(txq_first_id); 3016 ICE_RESTORE_FUNC_CAP(rxq_first_id); 3017 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 3018 ICE_RESTORE_FUNC_CAP(max_mtu); 3019 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 3020 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 3021 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 3022 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 3023 3024 /* one Tx and one Rx queue in safe mode */ 3025 func_caps->common_cap.num_rxq = 1; 3026 func_caps->common_cap.num_txq = 1; 3027 3028 /* two MSIX vectors, one for traffic and one for misc causes */ 3029 func_caps->common_cap.num_msix_vectors = 2; 3030 func_caps->guar_num_vsi = 1; 3031 3032 /* cache some dev_caps values that should be restored after memset */ 3033 cached_caps = dev_caps->common_cap; 3034 num_funcs = dev_caps->num_funcs; 3035 3036 /* unset dev capabilities */ 3037 memset(dev_caps, 0, sizeof(*dev_caps)); 3038 3039 #define ICE_RESTORE_DEV_CAP(name) \ 3040 dev_caps->common_cap.name = cached_caps.name 3041 3042 /* restore cached values */ 3043 ICE_RESTORE_DEV_CAP(valid_functions); 3044 ICE_RESTORE_DEV_CAP(txq_first_id); 3045 ICE_RESTORE_DEV_CAP(rxq_first_id); 3046 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 3047 ICE_RESTORE_DEV_CAP(max_mtu); 3048 ICE_RESTORE_DEV_CAP(nvm_unified_update); 3049 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 3050 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 3051 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 3052 dev_caps->num_funcs = num_funcs; 3053 3054 /* one Tx and one Rx queue per function in safe mode */ 3055 dev_caps->common_cap.num_rxq = num_funcs; 3056 dev_caps->common_cap.num_txq = num_funcs; 3057 3058 /* two MSIX vectors per function */ 3059 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 3060 } 3061 3062 /** 3063 * ice_get_caps - get info about the HW 3064 * @hw: pointer to the hardware structure 3065 */ 3066 int ice_get_caps(struct ice_hw *hw) 3067 { 3068 int status; 3069 3070 status = ice_discover_dev_caps(hw, &hw->dev_caps); 3071 if (status) 3072 return status; 3073 3074 return ice_discover_func_caps(hw, &hw->func_caps); 3075 } 3076 3077 /** 3078 * ice_aq_manage_mac_write - manage MAC address write command 3079 * @hw: pointer to the HW struct 3080 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 3081 * @flags: flags to control write behavior 3082 * @cd: pointer to command details structure or NULL 3083 * 3084 * This function is used to write MAC address to the NVM (0x0108). 3085 */ 3086 int 3087 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 3088 struct ice_sq_cd *cd) 3089 { 3090 struct ice_aqc_manage_mac_write *cmd; 3091 struct ice_aq_desc desc; 3092 3093 cmd = &desc.params.mac_write; 3094 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 3095 3096 cmd->flags = flags; 3097 ether_addr_copy(cmd->mac_addr, mac_addr); 3098 3099 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3100 } 3101 3102 /** 3103 * ice_aq_clear_pxe_mode 3104 * @hw: pointer to the HW struct 3105 * 3106 * Tell the firmware that the driver is taking over from PXE (0x0110). 3107 */ 3108 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 3109 { 3110 struct ice_aq_desc desc; 3111 3112 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3113 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3114 3115 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3116 } 3117 3118 /** 3119 * ice_clear_pxe_mode - clear pxe operations mode 3120 * @hw: pointer to the HW struct 3121 * 3122 * Make sure all PXE mode settings are cleared, including things 3123 * like descriptor fetch/write-back mode. 3124 */ 3125 void ice_clear_pxe_mode(struct ice_hw *hw) 3126 { 3127 if (ice_check_sq_alive(hw, &hw->adminq)) 3128 ice_aq_clear_pxe_mode(hw); 3129 } 3130 3131 /** 3132 * ice_aq_set_port_params - set physical port parameters. 3133 * @pi: pointer to the port info struct 3134 * @double_vlan: if set double VLAN is enabled 3135 * @cd: pointer to command details structure or NULL 3136 * 3137 * Set Physical port parameters (0x0203) 3138 */ 3139 int 3140 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 3141 struct ice_sq_cd *cd) 3142 3143 { 3144 struct ice_aqc_set_port_params *cmd; 3145 struct ice_hw *hw = pi->hw; 3146 struct ice_aq_desc desc; 3147 u16 cmd_flags = 0; 3148 3149 cmd = &desc.params.set_port_params; 3150 3151 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3152 if (double_vlan) 3153 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3154 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3155 3156 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3157 } 3158 3159 /** 3160 * ice_is_100m_speed_supported 3161 * @hw: pointer to the HW struct 3162 * 3163 * returns true if 100M speeds are supported by the device, 3164 * false otherwise. 3165 */ 3166 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3167 { 3168 switch (hw->device_id) { 3169 case ICE_DEV_ID_E822C_SGMII: 3170 case ICE_DEV_ID_E822L_SGMII: 3171 case ICE_DEV_ID_E823L_1GBE: 3172 case ICE_DEV_ID_E823C_SGMII: 3173 return true; 3174 default: 3175 return false; 3176 } 3177 } 3178 3179 /** 3180 * ice_get_link_speed_based_on_phy_type - returns link speed 3181 * @phy_type_low: lower part of phy_type 3182 * @phy_type_high: higher part of phy_type 3183 * 3184 * This helper function will convert an entry in PHY type structure 3185 * [phy_type_low, phy_type_high] to its corresponding link speed. 3186 * Note: In the structure of [phy_type_low, phy_type_high], there should 3187 * be one bit set, as this function will convert one PHY type to its 3188 * speed. 3189 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3190 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3191 */ 3192 static u16 3193 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3194 { 3195 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3196 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3197 3198 switch (phy_type_low) { 3199 case ICE_PHY_TYPE_LOW_100BASE_TX: 3200 case ICE_PHY_TYPE_LOW_100M_SGMII: 3201 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3202 break; 3203 case ICE_PHY_TYPE_LOW_1000BASE_T: 3204 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3205 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3206 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3207 case ICE_PHY_TYPE_LOW_1G_SGMII: 3208 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3209 break; 3210 case ICE_PHY_TYPE_LOW_2500BASE_T: 3211 case ICE_PHY_TYPE_LOW_2500BASE_X: 3212 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3213 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3214 break; 3215 case ICE_PHY_TYPE_LOW_5GBASE_T: 3216 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3217 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3218 break; 3219 case ICE_PHY_TYPE_LOW_10GBASE_T: 3220 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3221 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3222 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3223 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3224 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3225 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3226 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3227 break; 3228 case ICE_PHY_TYPE_LOW_25GBASE_T: 3229 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3230 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3231 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3232 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3233 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3234 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3235 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3236 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3237 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3238 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3239 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3240 break; 3241 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3242 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3243 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3244 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3245 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3246 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3247 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3248 break; 3249 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3250 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3251 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3252 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3253 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3254 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3255 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3256 case ICE_PHY_TYPE_LOW_50G_AUI2: 3257 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3258 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3259 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3260 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3261 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3262 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3263 case ICE_PHY_TYPE_LOW_50G_AUI1: 3264 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3265 break; 3266 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3267 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3268 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3269 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3270 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3271 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3272 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3273 case ICE_PHY_TYPE_LOW_100G_AUI4: 3274 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3275 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3276 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3277 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3278 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3279 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3280 break; 3281 default: 3282 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3283 break; 3284 } 3285 3286 switch (phy_type_high) { 3287 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3288 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3289 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3290 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3291 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3292 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3293 break; 3294 default: 3295 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3296 break; 3297 } 3298 3299 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3300 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3301 return ICE_AQ_LINK_SPEED_UNKNOWN; 3302 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3303 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3304 return ICE_AQ_LINK_SPEED_UNKNOWN; 3305 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3306 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3307 return speed_phy_type_low; 3308 else 3309 return speed_phy_type_high; 3310 } 3311 3312 /** 3313 * ice_update_phy_type 3314 * @phy_type_low: pointer to the lower part of phy_type 3315 * @phy_type_high: pointer to the higher part of phy_type 3316 * @link_speeds_bitmap: targeted link speeds bitmap 3317 * 3318 * Note: For the link_speeds_bitmap structure, you can check it at 3319 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3320 * link_speeds_bitmap include multiple speeds. 3321 * 3322 * Each entry in this [phy_type_low, phy_type_high] structure will 3323 * present a certain link speed. This helper function will turn on bits 3324 * in [phy_type_low, phy_type_high] structure based on the value of 3325 * link_speeds_bitmap input parameter. 3326 */ 3327 void 3328 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3329 u16 link_speeds_bitmap) 3330 { 3331 u64 pt_high; 3332 u64 pt_low; 3333 int index; 3334 u16 speed; 3335 3336 /* We first check with low part of phy_type */ 3337 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3338 pt_low = BIT_ULL(index); 3339 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3340 3341 if (link_speeds_bitmap & speed) 3342 *phy_type_low |= BIT_ULL(index); 3343 } 3344 3345 /* We then check with high part of phy_type */ 3346 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3347 pt_high = BIT_ULL(index); 3348 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3349 3350 if (link_speeds_bitmap & speed) 3351 *phy_type_high |= BIT_ULL(index); 3352 } 3353 } 3354 3355 /** 3356 * ice_aq_set_phy_cfg 3357 * @hw: pointer to the HW struct 3358 * @pi: port info structure of the interested logical port 3359 * @cfg: structure with PHY configuration data to be set 3360 * @cd: pointer to command details structure or NULL 3361 * 3362 * Set the various PHY configuration parameters supported on the Port. 3363 * One or more of the Set PHY config parameters may be ignored in an MFP 3364 * mode as the PF may not have the privilege to set some of the PHY Config 3365 * parameters. This status will be indicated by the command response (0x0601). 3366 */ 3367 int 3368 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3369 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3370 { 3371 struct ice_aq_desc desc; 3372 int status; 3373 3374 if (!cfg) 3375 return -EINVAL; 3376 3377 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3378 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3379 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3380 cfg->caps); 3381 3382 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3383 } 3384 3385 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3386 desc.params.set_phy.lport_num = pi->lport; 3387 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3388 3389 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3390 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3391 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3392 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3393 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3394 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3395 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3396 cfg->low_power_ctrl_an); 3397 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3398 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3399 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3400 cfg->link_fec_opt); 3401 3402 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3403 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3404 status = 0; 3405 3406 if (!status) 3407 pi->phy.curr_user_phy_cfg = *cfg; 3408 3409 return status; 3410 } 3411 3412 /** 3413 * ice_update_link_info - update status of the HW network link 3414 * @pi: port info structure of the interested logical port 3415 */ 3416 int ice_update_link_info(struct ice_port_info *pi) 3417 { 3418 struct ice_link_status *li; 3419 int status; 3420 3421 if (!pi) 3422 return -EINVAL; 3423 3424 li = &pi->phy.link_info; 3425 3426 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3427 if (status) 3428 return status; 3429 3430 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3431 struct ice_aqc_get_phy_caps_data *pcaps; 3432 struct ice_hw *hw; 3433 3434 hw = pi->hw; 3435 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 3436 GFP_KERNEL); 3437 if (!pcaps) 3438 return -ENOMEM; 3439 3440 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3441 pcaps, NULL); 3442 3443 devm_kfree(ice_hw_to_dev(hw), pcaps); 3444 } 3445 3446 return status; 3447 } 3448 3449 /** 3450 * ice_cache_phy_user_req 3451 * @pi: port information structure 3452 * @cache_data: PHY logging data 3453 * @cache_mode: PHY logging mode 3454 * 3455 * Log the user request on (FC, FEC, SPEED) for later use. 3456 */ 3457 static void 3458 ice_cache_phy_user_req(struct ice_port_info *pi, 3459 struct ice_phy_cache_mode_data cache_data, 3460 enum ice_phy_cache_mode cache_mode) 3461 { 3462 if (!pi) 3463 return; 3464 3465 switch (cache_mode) { 3466 case ICE_FC_MODE: 3467 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3468 break; 3469 case ICE_SPEED_MODE: 3470 pi->phy.curr_user_speed_req = 3471 cache_data.data.curr_user_speed_req; 3472 break; 3473 case ICE_FEC_MODE: 3474 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3475 break; 3476 default: 3477 break; 3478 } 3479 } 3480 3481 /** 3482 * ice_caps_to_fc_mode 3483 * @caps: PHY capabilities 3484 * 3485 * Convert PHY FC capabilities to ice FC mode 3486 */ 3487 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3488 { 3489 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3490 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3491 return ICE_FC_FULL; 3492 3493 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3494 return ICE_FC_TX_PAUSE; 3495 3496 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3497 return ICE_FC_RX_PAUSE; 3498 3499 return ICE_FC_NONE; 3500 } 3501 3502 /** 3503 * ice_caps_to_fec_mode 3504 * @caps: PHY capabilities 3505 * @fec_options: Link FEC options 3506 * 3507 * Convert PHY FEC capabilities to ice FEC mode 3508 */ 3509 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3510 { 3511 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3512 return ICE_FEC_AUTO; 3513 3514 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3515 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3516 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3517 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3518 return ICE_FEC_BASER; 3519 3520 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3521 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3522 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3523 return ICE_FEC_RS; 3524 3525 return ICE_FEC_NONE; 3526 } 3527 3528 /** 3529 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3530 * @pi: port information structure 3531 * @cfg: PHY configuration data to set FC mode 3532 * @req_mode: FC mode to configure 3533 */ 3534 int 3535 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3536 enum ice_fc_mode req_mode) 3537 { 3538 struct ice_phy_cache_mode_data cache_data; 3539 u8 pause_mask = 0x0; 3540 3541 if (!pi || !cfg) 3542 return -EINVAL; 3543 3544 switch (req_mode) { 3545 case ICE_FC_FULL: 3546 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3547 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3548 break; 3549 case ICE_FC_RX_PAUSE: 3550 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3551 break; 3552 case ICE_FC_TX_PAUSE: 3553 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3554 break; 3555 default: 3556 break; 3557 } 3558 3559 /* clear the old pause settings */ 3560 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3561 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3562 3563 /* set the new capabilities */ 3564 cfg->caps |= pause_mask; 3565 3566 /* Cache user FC request */ 3567 cache_data.data.curr_user_fc_req = req_mode; 3568 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3569 3570 return 0; 3571 } 3572 3573 /** 3574 * ice_set_fc 3575 * @pi: port information structure 3576 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3577 * @ena_auto_link_update: enable automatic link update 3578 * 3579 * Set the requested flow control mode. 3580 */ 3581 int 3582 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3583 { 3584 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3585 struct ice_aqc_get_phy_caps_data *pcaps; 3586 struct ice_hw *hw; 3587 int status; 3588 3589 if (!pi || !aq_failures) 3590 return -EINVAL; 3591 3592 *aq_failures = 0; 3593 hw = pi->hw; 3594 3595 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3596 if (!pcaps) 3597 return -ENOMEM; 3598 3599 /* Get the current PHY config */ 3600 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3601 pcaps, NULL); 3602 if (status) { 3603 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3604 goto out; 3605 } 3606 3607 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3608 3609 /* Configure the set PHY data */ 3610 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3611 if (status) 3612 goto out; 3613 3614 /* If the capabilities have changed, then set the new config */ 3615 if (cfg.caps != pcaps->caps) { 3616 int retry_count, retry_max = 10; 3617 3618 /* Auto restart link so settings take effect */ 3619 if (ena_auto_link_update) 3620 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3621 3622 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3623 if (status) { 3624 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3625 goto out; 3626 } 3627 3628 /* Update the link info 3629 * It sometimes takes a really long time for link to 3630 * come back from the atomic reset. Thus, we wait a 3631 * little bit. 3632 */ 3633 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3634 status = ice_update_link_info(pi); 3635 3636 if (!status) 3637 break; 3638 3639 mdelay(100); 3640 } 3641 3642 if (status) 3643 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3644 } 3645 3646 out: 3647 devm_kfree(ice_hw_to_dev(hw), pcaps); 3648 return status; 3649 } 3650 3651 /** 3652 * ice_phy_caps_equals_cfg 3653 * @phy_caps: PHY capabilities 3654 * @phy_cfg: PHY configuration 3655 * 3656 * Helper function to determine if PHY capabilities matches PHY 3657 * configuration 3658 */ 3659 bool 3660 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3661 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3662 { 3663 u8 caps_mask, cfg_mask; 3664 3665 if (!phy_caps || !phy_cfg) 3666 return false; 3667 3668 /* These bits are not common between capabilities and configuration. 3669 * Do not use them to determine equality. 3670 */ 3671 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3672 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3673 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3674 3675 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3676 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3677 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3678 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3679 phy_caps->eee_cap != phy_cfg->eee_cap || 3680 phy_caps->eeer_value != phy_cfg->eeer_value || 3681 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3682 return false; 3683 3684 return true; 3685 } 3686 3687 /** 3688 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3689 * @pi: port information structure 3690 * @caps: PHY ability structure to copy date from 3691 * @cfg: PHY configuration structure to copy data to 3692 * 3693 * Helper function to copy AQC PHY get ability data to PHY set configuration 3694 * data structure 3695 */ 3696 void 3697 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3698 struct ice_aqc_get_phy_caps_data *caps, 3699 struct ice_aqc_set_phy_cfg_data *cfg) 3700 { 3701 if (!pi || !caps || !cfg) 3702 return; 3703 3704 memset(cfg, 0, sizeof(*cfg)); 3705 cfg->phy_type_low = caps->phy_type_low; 3706 cfg->phy_type_high = caps->phy_type_high; 3707 cfg->caps = caps->caps; 3708 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3709 cfg->eee_cap = caps->eee_cap; 3710 cfg->eeer_value = caps->eeer_value; 3711 cfg->link_fec_opt = caps->link_fec_options; 3712 cfg->module_compliance_enforcement = 3713 caps->module_compliance_enforcement; 3714 } 3715 3716 /** 3717 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3718 * @pi: port information structure 3719 * @cfg: PHY configuration data to set FEC mode 3720 * @fec: FEC mode to configure 3721 */ 3722 int 3723 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3724 enum ice_fec_mode fec) 3725 { 3726 struct ice_aqc_get_phy_caps_data *pcaps; 3727 struct ice_hw *hw; 3728 int status; 3729 3730 if (!pi || !cfg) 3731 return -EINVAL; 3732 3733 hw = pi->hw; 3734 3735 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3736 if (!pcaps) 3737 return -ENOMEM; 3738 3739 status = ice_aq_get_phy_caps(pi, false, 3740 (ice_fw_supports_report_dflt_cfg(hw) ? 3741 ICE_AQC_REPORT_DFLT_CFG : 3742 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3743 if (status) 3744 goto out; 3745 3746 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3747 cfg->link_fec_opt = pcaps->link_fec_options; 3748 3749 switch (fec) { 3750 case ICE_FEC_BASER: 3751 /* Clear RS bits, and AND BASE-R ability 3752 * bits and OR request bits. 3753 */ 3754 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3755 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3756 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3757 ICE_AQC_PHY_FEC_25G_KR_REQ; 3758 break; 3759 case ICE_FEC_RS: 3760 /* Clear BASE-R bits, and AND RS ability 3761 * bits and OR request bits. 3762 */ 3763 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3764 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3765 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3766 break; 3767 case ICE_FEC_NONE: 3768 /* Clear all FEC option bits. */ 3769 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3770 break; 3771 case ICE_FEC_AUTO: 3772 /* AND auto FEC bit, and all caps bits. */ 3773 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3774 cfg->link_fec_opt |= pcaps->link_fec_options; 3775 break; 3776 default: 3777 status = -EINVAL; 3778 break; 3779 } 3780 3781 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3782 !ice_fw_supports_report_dflt_cfg(hw)) { 3783 struct ice_link_default_override_tlv tlv = { 0 }; 3784 3785 status = ice_get_link_default_override(&tlv, pi); 3786 if (status) 3787 goto out; 3788 3789 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3790 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3791 cfg->link_fec_opt = tlv.fec_options; 3792 } 3793 3794 out: 3795 kfree(pcaps); 3796 3797 return status; 3798 } 3799 3800 /** 3801 * ice_get_link_status - get status of the HW network link 3802 * @pi: port information structure 3803 * @link_up: pointer to bool (true/false = linkup/linkdown) 3804 * 3805 * Variable link_up is true if link is up, false if link is down. 3806 * The variable link_up is invalid if status is non zero. As a 3807 * result of this call, link status reporting becomes enabled 3808 */ 3809 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3810 { 3811 struct ice_phy_info *phy_info; 3812 int status = 0; 3813 3814 if (!pi || !link_up) 3815 return -EINVAL; 3816 3817 phy_info = &pi->phy; 3818 3819 if (phy_info->get_link_info) { 3820 status = ice_update_link_info(pi); 3821 3822 if (status) 3823 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3824 status); 3825 } 3826 3827 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3828 3829 return status; 3830 } 3831 3832 /** 3833 * ice_aq_set_link_restart_an 3834 * @pi: pointer to the port information structure 3835 * @ena_link: if true: enable link, if false: disable link 3836 * @cd: pointer to command details structure or NULL 3837 * 3838 * Sets up the link and restarts the Auto-Negotiation over the link. 3839 */ 3840 int 3841 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3842 struct ice_sq_cd *cd) 3843 { 3844 struct ice_aqc_restart_an *cmd; 3845 struct ice_aq_desc desc; 3846 3847 cmd = &desc.params.restart_an; 3848 3849 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3850 3851 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3852 cmd->lport_num = pi->lport; 3853 if (ena_link) 3854 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3855 else 3856 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3857 3858 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3859 } 3860 3861 /** 3862 * ice_aq_set_event_mask 3863 * @hw: pointer to the HW struct 3864 * @port_num: port number of the physical function 3865 * @mask: event mask to be set 3866 * @cd: pointer to command details structure or NULL 3867 * 3868 * Set event mask (0x0613) 3869 */ 3870 int 3871 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3872 struct ice_sq_cd *cd) 3873 { 3874 struct ice_aqc_set_event_mask *cmd; 3875 struct ice_aq_desc desc; 3876 3877 cmd = &desc.params.set_event_mask; 3878 3879 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3880 3881 cmd->lport_num = port_num; 3882 3883 cmd->event_mask = cpu_to_le16(mask); 3884 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3885 } 3886 3887 /** 3888 * ice_aq_set_mac_loopback 3889 * @hw: pointer to the HW struct 3890 * @ena_lpbk: Enable or Disable loopback 3891 * @cd: pointer to command details structure or NULL 3892 * 3893 * Enable/disable loopback on a given port 3894 */ 3895 int 3896 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3897 { 3898 struct ice_aqc_set_mac_lb *cmd; 3899 struct ice_aq_desc desc; 3900 3901 cmd = &desc.params.set_mac_lb; 3902 3903 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3904 if (ena_lpbk) 3905 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3906 3907 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3908 } 3909 3910 /** 3911 * ice_aq_set_port_id_led 3912 * @pi: pointer to the port information 3913 * @is_orig_mode: is this LED set to original mode (by the net-list) 3914 * @cd: pointer to command details structure or NULL 3915 * 3916 * Set LED value for the given port (0x06e9) 3917 */ 3918 int 3919 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3920 struct ice_sq_cd *cd) 3921 { 3922 struct ice_aqc_set_port_id_led *cmd; 3923 struct ice_hw *hw = pi->hw; 3924 struct ice_aq_desc desc; 3925 3926 cmd = &desc.params.set_port_id_led; 3927 3928 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3929 3930 if (is_orig_mode) 3931 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3932 else 3933 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3934 3935 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3936 } 3937 3938 /** 3939 * ice_aq_get_port_options 3940 * @hw: pointer to the HW struct 3941 * @options: buffer for the resultant port options 3942 * @option_count: input - size of the buffer in port options structures, 3943 * output - number of returned port options 3944 * @lport: logical port to call the command with (optional) 3945 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3946 * when PF owns more than 1 port it must be true 3947 * @active_option_idx: index of active port option in returned buffer 3948 * @active_option_valid: active option in returned buffer is valid 3949 * @pending_option_idx: index of pending port option in returned buffer 3950 * @pending_option_valid: pending option in returned buffer is valid 3951 * 3952 * Calls Get Port Options AQC (0x06ea) and verifies result. 3953 */ 3954 int 3955 ice_aq_get_port_options(struct ice_hw *hw, 3956 struct ice_aqc_get_port_options_elem *options, 3957 u8 *option_count, u8 lport, bool lport_valid, 3958 u8 *active_option_idx, bool *active_option_valid, 3959 u8 *pending_option_idx, bool *pending_option_valid) 3960 { 3961 struct ice_aqc_get_port_options *cmd; 3962 struct ice_aq_desc desc; 3963 int status; 3964 u8 i; 3965 3966 /* options buffer shall be able to hold max returned options */ 3967 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3968 return -EINVAL; 3969 3970 cmd = &desc.params.get_port_options; 3971 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3972 3973 if (lport_valid) 3974 cmd->lport_num = lport; 3975 cmd->lport_num_valid = lport_valid; 3976 3977 status = ice_aq_send_cmd(hw, &desc, options, 3978 *option_count * sizeof(*options), NULL); 3979 if (status) 3980 return status; 3981 3982 /* verify direct FW response & set output parameters */ 3983 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 3984 cmd->port_options_count); 3985 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 3986 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 3987 cmd->port_options); 3988 if (*active_option_valid) { 3989 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 3990 cmd->port_options); 3991 if (*active_option_idx > (*option_count - 1)) 3992 return -EIO; 3993 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 3994 *active_option_idx); 3995 } 3996 3997 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 3998 cmd->pending_port_option_status); 3999 if (*pending_option_valid) { 4000 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 4001 cmd->pending_port_option_status); 4002 if (*pending_option_idx > (*option_count - 1)) 4003 return -EIO; 4004 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 4005 *pending_option_idx); 4006 } 4007 4008 /* mask output options fields */ 4009 for (i = 0; i < *option_count; i++) { 4010 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 4011 options[i].pmd); 4012 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 4013 options[i].max_lane_speed); 4014 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 4015 options[i].pmd, options[i].max_lane_speed); 4016 } 4017 4018 return 0; 4019 } 4020 4021 /** 4022 * ice_aq_set_port_option 4023 * @hw: pointer to the HW struct 4024 * @lport: logical port to call the command with 4025 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 4026 * when PF owns more than 1 port it must be true 4027 * @new_option: new port option to be written 4028 * 4029 * Calls Set Port Options AQC (0x06eb). 4030 */ 4031 int 4032 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 4033 u8 new_option) 4034 { 4035 struct ice_aqc_set_port_option *cmd; 4036 struct ice_aq_desc desc; 4037 4038 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 4039 return -EINVAL; 4040 4041 cmd = &desc.params.set_port_option; 4042 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 4043 4044 if (lport_valid) 4045 cmd->lport_num = lport; 4046 4047 cmd->lport_num_valid = lport_valid; 4048 cmd->selected_port_option = new_option; 4049 4050 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4051 } 4052 4053 /** 4054 * ice_aq_sff_eeprom 4055 * @hw: pointer to the HW struct 4056 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 4057 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 4058 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 4059 * @page: QSFP page 4060 * @set_page: set or ignore the page 4061 * @data: pointer to data buffer to be read/written to the I2C device. 4062 * @length: 1-16 for read, 1 for write. 4063 * @write: 0 read, 1 for write. 4064 * @cd: pointer to command details structure or NULL 4065 * 4066 * Read/Write SFF EEPROM (0x06EE) 4067 */ 4068 int 4069 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 4070 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 4071 bool write, struct ice_sq_cd *cd) 4072 { 4073 struct ice_aqc_sff_eeprom *cmd; 4074 struct ice_aq_desc desc; 4075 int status; 4076 4077 if (!data || (mem_addr & 0xff00)) 4078 return -EINVAL; 4079 4080 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 4081 cmd = &desc.params.read_write_sff_param; 4082 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 4083 cmd->lport_num = (u8)(lport & 0xff); 4084 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 4085 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 4086 ICE_AQC_SFF_I2CBUS_7BIT_M) | 4087 ((set_page << 4088 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 4089 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 4090 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 4091 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 4092 if (write) 4093 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 4094 4095 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 4096 return status; 4097 } 4098 4099 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 4100 { 4101 switch (type) { 4102 case ICE_LUT_VSI: 4103 return ICE_LUT_VSI_SIZE; 4104 case ICE_LUT_GLOBAL: 4105 return ICE_LUT_GLOBAL_SIZE; 4106 case ICE_LUT_PF: 4107 return ICE_LUT_PF_SIZE; 4108 } 4109 WARN_ONCE(1, "incorrect type passed"); 4110 return ICE_LUT_VSI_SIZE; 4111 } 4112 4113 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 4114 { 4115 switch (size) { 4116 case ICE_LUT_VSI_SIZE: 4117 return ICE_AQC_LUT_SIZE_SMALL; 4118 case ICE_LUT_GLOBAL_SIZE: 4119 return ICE_AQC_LUT_SIZE_512; 4120 case ICE_LUT_PF_SIZE: 4121 return ICE_AQC_LUT_SIZE_2K; 4122 } 4123 WARN_ONCE(1, "incorrect size passed"); 4124 return 0; 4125 } 4126 4127 /** 4128 * __ice_aq_get_set_rss_lut 4129 * @hw: pointer to the hardware structure 4130 * @params: RSS LUT parameters 4131 * @set: set true to set the table, false to get the table 4132 * 4133 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4134 */ 4135 static int 4136 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 4137 struct ice_aq_get_set_rss_lut_params *params, bool set) 4138 { 4139 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 4140 enum ice_lut_type lut_type = params->lut_type; 4141 struct ice_aqc_get_set_rss_lut *desc_params; 4142 enum ice_aqc_lut_flags flags; 4143 enum ice_lut_size lut_size; 4144 struct ice_aq_desc desc; 4145 u8 *lut = params->lut; 4146 4147 4148 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 4149 return -EINVAL; 4150 4151 lut_size = ice_lut_type_to_size(lut_type); 4152 if (lut_size > params->lut_size) 4153 return -EINVAL; 4154 else if (set && lut_size != params->lut_size) 4155 return -EINVAL; 4156 4157 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4158 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4159 if (set) 4160 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4161 4162 desc_params = &desc.params.get_set_rss_lut; 4163 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4164 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4165 4166 if (lut_type == ICE_LUT_GLOBAL) 4167 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4168 params->global_lut_id); 4169 4170 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4171 desc_params->flags = cpu_to_le16(flags); 4172 4173 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4174 } 4175 4176 /** 4177 * ice_aq_get_rss_lut 4178 * @hw: pointer to the hardware structure 4179 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4180 * 4181 * get the RSS lookup table, PF or VSI type 4182 */ 4183 int 4184 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4185 { 4186 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4187 } 4188 4189 /** 4190 * ice_aq_set_rss_lut 4191 * @hw: pointer to the hardware structure 4192 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4193 * 4194 * set the RSS lookup table, PF or VSI type 4195 */ 4196 int 4197 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4198 { 4199 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4200 } 4201 4202 /** 4203 * __ice_aq_get_set_rss_key 4204 * @hw: pointer to the HW struct 4205 * @vsi_id: VSI FW index 4206 * @key: pointer to key info struct 4207 * @set: set true to set the key, false to get the key 4208 * 4209 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4210 */ 4211 static int 4212 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4213 struct ice_aqc_get_set_rss_keys *key, bool set) 4214 { 4215 struct ice_aqc_get_set_rss_key *desc_params; 4216 u16 key_size = sizeof(*key); 4217 struct ice_aq_desc desc; 4218 4219 if (set) { 4220 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4221 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4222 } else { 4223 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4224 } 4225 4226 desc_params = &desc.params.get_set_rss_key; 4227 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4228 4229 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4230 } 4231 4232 /** 4233 * ice_aq_get_rss_key 4234 * @hw: pointer to the HW struct 4235 * @vsi_handle: software VSI handle 4236 * @key: pointer to key info struct 4237 * 4238 * get the RSS key per VSI 4239 */ 4240 int 4241 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4242 struct ice_aqc_get_set_rss_keys *key) 4243 { 4244 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4245 return -EINVAL; 4246 4247 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4248 key, false); 4249 } 4250 4251 /** 4252 * ice_aq_set_rss_key 4253 * @hw: pointer to the HW struct 4254 * @vsi_handle: software VSI handle 4255 * @keys: pointer to key info struct 4256 * 4257 * set the RSS key per VSI 4258 */ 4259 int 4260 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4261 struct ice_aqc_get_set_rss_keys *keys) 4262 { 4263 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4264 return -EINVAL; 4265 4266 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4267 keys, true); 4268 } 4269 4270 /** 4271 * ice_aq_add_lan_txq 4272 * @hw: pointer to the hardware structure 4273 * @num_qgrps: Number of added queue groups 4274 * @qg_list: list of queue groups to be added 4275 * @buf_size: size of buffer for indirect command 4276 * @cd: pointer to command details structure or NULL 4277 * 4278 * Add Tx LAN queue (0x0C30) 4279 * 4280 * NOTE: 4281 * Prior to calling add Tx LAN queue: 4282 * Initialize the following as part of the Tx queue context: 4283 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4284 * Cache profile and Packet shaper profile. 4285 * 4286 * After add Tx LAN queue AQ command is completed: 4287 * Interrupts should be associated with specific queues, 4288 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4289 * flow. 4290 */ 4291 static int 4292 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4293 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4294 struct ice_sq_cd *cd) 4295 { 4296 struct ice_aqc_add_tx_qgrp *list; 4297 struct ice_aqc_add_txqs *cmd; 4298 struct ice_aq_desc desc; 4299 u16 i, sum_size = 0; 4300 4301 cmd = &desc.params.add_txqs; 4302 4303 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4304 4305 if (!qg_list) 4306 return -EINVAL; 4307 4308 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4309 return -EINVAL; 4310 4311 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4312 sum_size += struct_size(list, txqs, list->num_txqs); 4313 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4314 list->num_txqs); 4315 } 4316 4317 if (buf_size != sum_size) 4318 return -EINVAL; 4319 4320 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4321 4322 cmd->num_qgrps = num_qgrps; 4323 4324 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4325 } 4326 4327 /** 4328 * ice_aq_dis_lan_txq 4329 * @hw: pointer to the hardware structure 4330 * @num_qgrps: number of groups in the list 4331 * @qg_list: the list of groups to disable 4332 * @buf_size: the total size of the qg_list buffer in bytes 4333 * @rst_src: if called due to reset, specifies the reset source 4334 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4335 * @cd: pointer to command details structure or NULL 4336 * 4337 * Disable LAN Tx queue (0x0C31) 4338 */ 4339 static int 4340 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4341 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4342 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4343 struct ice_sq_cd *cd) 4344 { 4345 struct ice_aqc_dis_txq_item *item; 4346 struct ice_aqc_dis_txqs *cmd; 4347 struct ice_aq_desc desc; 4348 u16 i, sz = 0; 4349 int status; 4350 4351 cmd = &desc.params.dis_txqs; 4352 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4353 4354 /* qg_list can be NULL only in VM/VF reset flow */ 4355 if (!qg_list && !rst_src) 4356 return -EINVAL; 4357 4358 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4359 return -EINVAL; 4360 4361 cmd->num_entries = num_qgrps; 4362 4363 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 4364 ICE_AQC_Q_DIS_TIMEOUT_M); 4365 4366 switch (rst_src) { 4367 case ICE_VM_RESET: 4368 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4369 cmd->vmvf_and_timeout |= 4370 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 4371 break; 4372 case ICE_VF_RESET: 4373 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4374 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4375 cmd->vmvf_and_timeout |= 4376 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 4377 ICE_AQC_Q_DIS_VMVF_NUM_M); 4378 break; 4379 case ICE_NO_RESET: 4380 default: 4381 break; 4382 } 4383 4384 /* flush pipe on time out */ 4385 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4386 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4387 if (!qg_list) 4388 goto do_aq; 4389 4390 /* set RD bit to indicate that command buffer is provided by the driver 4391 * and it needs to be read by the firmware 4392 */ 4393 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4394 4395 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4396 u16 item_size = struct_size(item, q_id, item->num_qs); 4397 4398 /* If the num of queues is even, add 2 bytes of padding */ 4399 if ((item->num_qs % 2) == 0) 4400 item_size += 2; 4401 4402 sz += item_size; 4403 4404 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4405 } 4406 4407 if (buf_size != sz) 4408 return -EINVAL; 4409 4410 do_aq: 4411 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4412 if (status) { 4413 if (!qg_list) 4414 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4415 vmvf_num, hw->adminq.sq_last_status); 4416 else 4417 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4418 le16_to_cpu(qg_list[0].q_id[0]), 4419 hw->adminq.sq_last_status); 4420 } 4421 return status; 4422 } 4423 4424 /** 4425 * ice_aq_cfg_lan_txq 4426 * @hw: pointer to the hardware structure 4427 * @buf: buffer for command 4428 * @buf_size: size of buffer in bytes 4429 * @num_qs: number of queues being configured 4430 * @oldport: origination lport 4431 * @newport: destination lport 4432 * @cd: pointer to command details structure or NULL 4433 * 4434 * Move/Configure LAN Tx queue (0x0C32) 4435 * 4436 * There is a better AQ command to use for moving nodes, so only coding 4437 * this one for configuring the node. 4438 */ 4439 int 4440 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4441 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4442 struct ice_sq_cd *cd) 4443 { 4444 struct ice_aqc_cfg_txqs *cmd; 4445 struct ice_aq_desc desc; 4446 int status; 4447 4448 cmd = &desc.params.cfg_txqs; 4449 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4450 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4451 4452 if (!buf) 4453 return -EINVAL; 4454 4455 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4456 cmd->num_qs = num_qs; 4457 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4458 cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) & 4459 ICE_AQC_Q_CFG_DST_PRT_M; 4460 cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) & 4461 ICE_AQC_Q_CFG_TIMEOUT_M; 4462 cmd->blocked_cgds = 0; 4463 4464 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4465 if (status) 4466 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4467 hw->adminq.sq_last_status); 4468 return status; 4469 } 4470 4471 /** 4472 * ice_aq_add_rdma_qsets 4473 * @hw: pointer to the hardware structure 4474 * @num_qset_grps: Number of RDMA Qset groups 4475 * @qset_list: list of Qset groups to be added 4476 * @buf_size: size of buffer for indirect command 4477 * @cd: pointer to command details structure or NULL 4478 * 4479 * Add Tx RDMA Qsets (0x0C33) 4480 */ 4481 static int 4482 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4483 struct ice_aqc_add_rdma_qset_data *qset_list, 4484 u16 buf_size, struct ice_sq_cd *cd) 4485 { 4486 struct ice_aqc_add_rdma_qset_data *list; 4487 struct ice_aqc_add_rdma_qset *cmd; 4488 struct ice_aq_desc desc; 4489 u16 i, sum_size = 0; 4490 4491 cmd = &desc.params.add_rdma_qset; 4492 4493 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4494 4495 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4496 return -EINVAL; 4497 4498 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4499 u16 num_qsets = le16_to_cpu(list->num_qsets); 4500 4501 sum_size += struct_size(list, rdma_qsets, num_qsets); 4502 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4503 num_qsets); 4504 } 4505 4506 if (buf_size != sum_size) 4507 return -EINVAL; 4508 4509 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4510 4511 cmd->num_qset_grps = num_qset_grps; 4512 4513 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4514 } 4515 4516 /* End of FW Admin Queue command wrappers */ 4517 4518 /** 4519 * ice_write_byte - write a byte to a packed context structure 4520 * @src_ctx: the context structure to read from 4521 * @dest_ctx: the context to be written to 4522 * @ce_info: a description of the struct to be filled 4523 */ 4524 static void 4525 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4526 { 4527 u8 src_byte, dest_byte, mask; 4528 u8 *from, *dest; 4529 u16 shift_width; 4530 4531 /* copy from the next struct field */ 4532 from = src_ctx + ce_info->offset; 4533 4534 /* prepare the bits and mask */ 4535 shift_width = ce_info->lsb % 8; 4536 mask = (u8)(BIT(ce_info->width) - 1); 4537 4538 src_byte = *from; 4539 src_byte &= mask; 4540 4541 /* shift to correct alignment */ 4542 mask <<= shift_width; 4543 src_byte <<= shift_width; 4544 4545 /* get the current bits from the target bit string */ 4546 dest = dest_ctx + (ce_info->lsb / 8); 4547 4548 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4549 4550 dest_byte &= ~mask; /* get the bits not changing */ 4551 dest_byte |= src_byte; /* add in the new bits */ 4552 4553 /* put it all back */ 4554 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4555 } 4556 4557 /** 4558 * ice_write_word - write a word to a packed context structure 4559 * @src_ctx: the context structure to read from 4560 * @dest_ctx: the context to be written to 4561 * @ce_info: a description of the struct to be filled 4562 */ 4563 static void 4564 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4565 { 4566 u16 src_word, mask; 4567 __le16 dest_word; 4568 u8 *from, *dest; 4569 u16 shift_width; 4570 4571 /* copy from the next struct field */ 4572 from = src_ctx + ce_info->offset; 4573 4574 /* prepare the bits and mask */ 4575 shift_width = ce_info->lsb % 8; 4576 mask = BIT(ce_info->width) - 1; 4577 4578 /* don't swizzle the bits until after the mask because the mask bits 4579 * will be in a different bit position on big endian machines 4580 */ 4581 src_word = *(u16 *)from; 4582 src_word &= mask; 4583 4584 /* shift to correct alignment */ 4585 mask <<= shift_width; 4586 src_word <<= shift_width; 4587 4588 /* get the current bits from the target bit string */ 4589 dest = dest_ctx + (ce_info->lsb / 8); 4590 4591 memcpy(&dest_word, dest, sizeof(dest_word)); 4592 4593 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4594 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4595 4596 /* put it all back */ 4597 memcpy(dest, &dest_word, sizeof(dest_word)); 4598 } 4599 4600 /** 4601 * ice_write_dword - write a dword to a packed context structure 4602 * @src_ctx: the context structure to read from 4603 * @dest_ctx: the context to be written to 4604 * @ce_info: a description of the struct to be filled 4605 */ 4606 static void 4607 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4608 { 4609 u32 src_dword, mask; 4610 __le32 dest_dword; 4611 u8 *from, *dest; 4612 u16 shift_width; 4613 4614 /* copy from the next struct field */ 4615 from = src_ctx + ce_info->offset; 4616 4617 /* prepare the bits and mask */ 4618 shift_width = ce_info->lsb % 8; 4619 4620 /* if the field width is exactly 32 on an x86 machine, then the shift 4621 * operation will not work because the SHL instructions count is masked 4622 * to 5 bits so the shift will do nothing 4623 */ 4624 if (ce_info->width < 32) 4625 mask = BIT(ce_info->width) - 1; 4626 else 4627 mask = (u32)~0; 4628 4629 /* don't swizzle the bits until after the mask because the mask bits 4630 * will be in a different bit position on big endian machines 4631 */ 4632 src_dword = *(u32 *)from; 4633 src_dword &= mask; 4634 4635 /* shift to correct alignment */ 4636 mask <<= shift_width; 4637 src_dword <<= shift_width; 4638 4639 /* get the current bits from the target bit string */ 4640 dest = dest_ctx + (ce_info->lsb / 8); 4641 4642 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4643 4644 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4645 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4646 4647 /* put it all back */ 4648 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4649 } 4650 4651 /** 4652 * ice_write_qword - write a qword to a packed context structure 4653 * @src_ctx: the context structure to read from 4654 * @dest_ctx: the context to be written to 4655 * @ce_info: a description of the struct to be filled 4656 */ 4657 static void 4658 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4659 { 4660 u64 src_qword, mask; 4661 __le64 dest_qword; 4662 u8 *from, *dest; 4663 u16 shift_width; 4664 4665 /* copy from the next struct field */ 4666 from = src_ctx + ce_info->offset; 4667 4668 /* prepare the bits and mask */ 4669 shift_width = ce_info->lsb % 8; 4670 4671 /* if the field width is exactly 64 on an x86 machine, then the shift 4672 * operation will not work because the SHL instructions count is masked 4673 * to 6 bits so the shift will do nothing 4674 */ 4675 if (ce_info->width < 64) 4676 mask = BIT_ULL(ce_info->width) - 1; 4677 else 4678 mask = (u64)~0; 4679 4680 /* don't swizzle the bits until after the mask because the mask bits 4681 * will be in a different bit position on big endian machines 4682 */ 4683 src_qword = *(u64 *)from; 4684 src_qword &= mask; 4685 4686 /* shift to correct alignment */ 4687 mask <<= shift_width; 4688 src_qword <<= shift_width; 4689 4690 /* get the current bits from the target bit string */ 4691 dest = dest_ctx + (ce_info->lsb / 8); 4692 4693 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4694 4695 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4696 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4697 4698 /* put it all back */ 4699 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4700 } 4701 4702 /** 4703 * ice_set_ctx - set context bits in packed structure 4704 * @hw: pointer to the hardware structure 4705 * @src_ctx: pointer to a generic non-packed context structure 4706 * @dest_ctx: pointer to memory for the packed structure 4707 * @ce_info: a description of the structure to be transformed 4708 */ 4709 int 4710 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4711 const struct ice_ctx_ele *ce_info) 4712 { 4713 int f; 4714 4715 for (f = 0; ce_info[f].width; f++) { 4716 /* We have to deal with each element of the FW response 4717 * using the correct size so that we are correct regardless 4718 * of the endianness of the machine. 4719 */ 4720 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4721 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4722 f, ce_info[f].width, ce_info[f].size_of); 4723 continue; 4724 } 4725 switch (ce_info[f].size_of) { 4726 case sizeof(u8): 4727 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4728 break; 4729 case sizeof(u16): 4730 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4731 break; 4732 case sizeof(u32): 4733 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4734 break; 4735 case sizeof(u64): 4736 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4737 break; 4738 default: 4739 return -EINVAL; 4740 } 4741 } 4742 4743 return 0; 4744 } 4745 4746 /** 4747 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4748 * @hw: pointer to the HW struct 4749 * @vsi_handle: software VSI handle 4750 * @tc: TC number 4751 * @q_handle: software queue handle 4752 */ 4753 struct ice_q_ctx * 4754 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4755 { 4756 struct ice_vsi_ctx *vsi; 4757 struct ice_q_ctx *q_ctx; 4758 4759 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4760 if (!vsi) 4761 return NULL; 4762 if (q_handle >= vsi->num_lan_q_entries[tc]) 4763 return NULL; 4764 if (!vsi->lan_q_ctx[tc]) 4765 return NULL; 4766 q_ctx = vsi->lan_q_ctx[tc]; 4767 return &q_ctx[q_handle]; 4768 } 4769 4770 /** 4771 * ice_ena_vsi_txq 4772 * @pi: port information structure 4773 * @vsi_handle: software VSI handle 4774 * @tc: TC number 4775 * @q_handle: software queue handle 4776 * @num_qgrps: Number of added queue groups 4777 * @buf: list of queue groups to be added 4778 * @buf_size: size of buffer for indirect command 4779 * @cd: pointer to command details structure or NULL 4780 * 4781 * This function adds one LAN queue 4782 */ 4783 int 4784 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4785 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4786 struct ice_sq_cd *cd) 4787 { 4788 struct ice_aqc_txsched_elem_data node = { 0 }; 4789 struct ice_sched_node *parent; 4790 struct ice_q_ctx *q_ctx; 4791 struct ice_hw *hw; 4792 int status; 4793 4794 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4795 return -EIO; 4796 4797 if (num_qgrps > 1 || buf->num_txqs > 1) 4798 return -ENOSPC; 4799 4800 hw = pi->hw; 4801 4802 if (!ice_is_vsi_valid(hw, vsi_handle)) 4803 return -EINVAL; 4804 4805 mutex_lock(&pi->sched_lock); 4806 4807 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4808 if (!q_ctx) { 4809 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4810 q_handle); 4811 status = -EINVAL; 4812 goto ena_txq_exit; 4813 } 4814 4815 /* find a parent node */ 4816 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4817 ICE_SCHED_NODE_OWNER_LAN); 4818 if (!parent) { 4819 status = -EINVAL; 4820 goto ena_txq_exit; 4821 } 4822 4823 buf->parent_teid = parent->info.node_teid; 4824 node.parent_teid = parent->info.node_teid; 4825 /* Mark that the values in the "generic" section as valid. The default 4826 * value in the "generic" section is zero. This means that : 4827 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4828 * - 0 priority among siblings, indicated by Bit 1-3. 4829 * - WFQ, indicated by Bit 4. 4830 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4831 * Bit 5-6. 4832 * - Bit 7 is reserved. 4833 * Without setting the generic section as valid in valid_sections, the 4834 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4835 */ 4836 buf->txqs[0].info.valid_sections = 4837 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4838 ICE_AQC_ELEM_VALID_EIR; 4839 buf->txqs[0].info.generic = 0; 4840 buf->txqs[0].info.cir_bw.bw_profile_idx = 4841 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4842 buf->txqs[0].info.cir_bw.bw_alloc = 4843 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4844 buf->txqs[0].info.eir_bw.bw_profile_idx = 4845 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4846 buf->txqs[0].info.eir_bw.bw_alloc = 4847 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4848 4849 /* add the LAN queue */ 4850 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4851 if (status) { 4852 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4853 le16_to_cpu(buf->txqs[0].txq_id), 4854 hw->adminq.sq_last_status); 4855 goto ena_txq_exit; 4856 } 4857 4858 node.node_teid = buf->txqs[0].q_teid; 4859 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4860 q_ctx->q_handle = q_handle; 4861 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4862 4863 /* add a leaf node into scheduler tree queue layer */ 4864 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4865 if (!status) 4866 status = ice_sched_replay_q_bw(pi, q_ctx); 4867 4868 ena_txq_exit: 4869 mutex_unlock(&pi->sched_lock); 4870 return status; 4871 } 4872 4873 /** 4874 * ice_dis_vsi_txq 4875 * @pi: port information structure 4876 * @vsi_handle: software VSI handle 4877 * @tc: TC number 4878 * @num_queues: number of queues 4879 * @q_handles: pointer to software queue handle array 4880 * @q_ids: pointer to the q_id array 4881 * @q_teids: pointer to queue node teids 4882 * @rst_src: if called due to reset, specifies the reset source 4883 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4884 * @cd: pointer to command details structure or NULL 4885 * 4886 * This function removes queues and their corresponding nodes in SW DB 4887 */ 4888 int 4889 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4890 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4891 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4892 struct ice_sq_cd *cd) 4893 { 4894 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4895 u16 i, buf_size = __struct_size(qg_list); 4896 struct ice_q_ctx *q_ctx; 4897 int status = -ENOENT; 4898 struct ice_hw *hw; 4899 4900 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4901 return -EIO; 4902 4903 hw = pi->hw; 4904 4905 if (!num_queues) { 4906 /* if queue is disabled already yet the disable queue command 4907 * has to be sent to complete the VF reset, then call 4908 * ice_aq_dis_lan_txq without any queue information 4909 */ 4910 if (rst_src) 4911 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4912 vmvf_num, NULL); 4913 return -EIO; 4914 } 4915 4916 mutex_lock(&pi->sched_lock); 4917 4918 for (i = 0; i < num_queues; i++) { 4919 struct ice_sched_node *node; 4920 4921 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4922 if (!node) 4923 continue; 4924 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4925 if (!q_ctx) { 4926 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4927 q_handles[i]); 4928 continue; 4929 } 4930 if (q_ctx->q_handle != q_handles[i]) { 4931 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4932 q_ctx->q_handle, q_handles[i]); 4933 continue; 4934 } 4935 qg_list->parent_teid = node->info.parent_teid; 4936 qg_list->num_qs = 1; 4937 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4938 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4939 vmvf_num, cd); 4940 4941 if (status) 4942 break; 4943 ice_free_sched_node(pi, node); 4944 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4945 q_ctx->q_teid = ICE_INVAL_TEID; 4946 } 4947 mutex_unlock(&pi->sched_lock); 4948 return status; 4949 } 4950 4951 /** 4952 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4953 * @pi: port information structure 4954 * @vsi_handle: software VSI handle 4955 * @tc_bitmap: TC bitmap 4956 * @maxqs: max queues array per TC 4957 * @owner: LAN or RDMA 4958 * 4959 * This function adds/updates the VSI queues per TC. 4960 */ 4961 static int 4962 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4963 u16 *maxqs, u8 owner) 4964 { 4965 int status = 0; 4966 u8 i; 4967 4968 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4969 return -EIO; 4970 4971 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4972 return -EINVAL; 4973 4974 mutex_lock(&pi->sched_lock); 4975 4976 ice_for_each_traffic_class(i) { 4977 /* configuration is possible only if TC node is present */ 4978 if (!ice_sched_get_tc_node(pi, i)) 4979 continue; 4980 4981 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4982 ice_is_tc_ena(tc_bitmap, i)); 4983 if (status) 4984 break; 4985 } 4986 4987 mutex_unlock(&pi->sched_lock); 4988 return status; 4989 } 4990 4991 /** 4992 * ice_cfg_vsi_lan - configure VSI LAN queues 4993 * @pi: port information structure 4994 * @vsi_handle: software VSI handle 4995 * @tc_bitmap: TC bitmap 4996 * @max_lanqs: max LAN queues array per TC 4997 * 4998 * This function adds/updates the VSI LAN queues per TC. 4999 */ 5000 int 5001 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 5002 u16 *max_lanqs) 5003 { 5004 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 5005 ICE_SCHED_NODE_OWNER_LAN); 5006 } 5007 5008 /** 5009 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 5010 * @pi: port information structure 5011 * @vsi_handle: software VSI handle 5012 * @tc_bitmap: TC bitmap 5013 * @max_rdmaqs: max RDMA queues array per TC 5014 * 5015 * This function adds/updates the VSI RDMA queues per TC. 5016 */ 5017 int 5018 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5019 u16 *max_rdmaqs) 5020 { 5021 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 5022 ICE_SCHED_NODE_OWNER_RDMA); 5023 } 5024 5025 /** 5026 * ice_ena_vsi_rdma_qset 5027 * @pi: port information structure 5028 * @vsi_handle: software VSI handle 5029 * @tc: TC number 5030 * @rdma_qset: pointer to RDMA Qset 5031 * @num_qsets: number of RDMA Qsets 5032 * @qset_teid: pointer to Qset node TEIDs 5033 * 5034 * This function adds RDMA Qset 5035 */ 5036 int 5037 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 5038 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 5039 { 5040 struct ice_aqc_txsched_elem_data node = { 0 }; 5041 struct ice_aqc_add_rdma_qset_data *buf; 5042 struct ice_sched_node *parent; 5043 struct ice_hw *hw; 5044 u16 i, buf_size; 5045 int ret; 5046 5047 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5048 return -EIO; 5049 hw = pi->hw; 5050 5051 if (!ice_is_vsi_valid(hw, vsi_handle)) 5052 return -EINVAL; 5053 5054 buf_size = struct_size(buf, rdma_qsets, num_qsets); 5055 buf = kzalloc(buf_size, GFP_KERNEL); 5056 if (!buf) 5057 return -ENOMEM; 5058 mutex_lock(&pi->sched_lock); 5059 5060 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 5061 ICE_SCHED_NODE_OWNER_RDMA); 5062 if (!parent) { 5063 ret = -EINVAL; 5064 goto rdma_error_exit; 5065 } 5066 buf->parent_teid = parent->info.node_teid; 5067 node.parent_teid = parent->info.node_teid; 5068 5069 buf->num_qsets = cpu_to_le16(num_qsets); 5070 for (i = 0; i < num_qsets; i++) { 5071 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 5072 buf->rdma_qsets[i].info.valid_sections = 5073 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5074 ICE_AQC_ELEM_VALID_EIR; 5075 buf->rdma_qsets[i].info.generic = 0; 5076 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 5077 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5078 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 5079 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5080 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 5081 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5082 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 5083 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5084 } 5085 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 5086 if (ret) { 5087 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 5088 goto rdma_error_exit; 5089 } 5090 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5091 for (i = 0; i < num_qsets; i++) { 5092 node.node_teid = buf->rdma_qsets[i].qset_teid; 5093 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 5094 &node, NULL); 5095 if (ret) 5096 break; 5097 qset_teid[i] = le32_to_cpu(node.node_teid); 5098 } 5099 rdma_error_exit: 5100 mutex_unlock(&pi->sched_lock); 5101 kfree(buf); 5102 return ret; 5103 } 5104 5105 /** 5106 * ice_dis_vsi_rdma_qset - free RDMA resources 5107 * @pi: port_info struct 5108 * @count: number of RDMA Qsets to free 5109 * @qset_teid: TEID of Qset node 5110 * @q_id: list of queue IDs being disabled 5111 */ 5112 int 5113 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 5114 u16 *q_id) 5115 { 5116 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 5117 u16 qg_size = __struct_size(qg_list); 5118 struct ice_hw *hw; 5119 int status = 0; 5120 int i; 5121 5122 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5123 return -EIO; 5124 5125 hw = pi->hw; 5126 5127 mutex_lock(&pi->sched_lock); 5128 5129 for (i = 0; i < count; i++) { 5130 struct ice_sched_node *node; 5131 5132 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 5133 if (!node) 5134 continue; 5135 5136 qg_list->parent_teid = node->info.parent_teid; 5137 qg_list->num_qs = 1; 5138 qg_list->q_id[0] = 5139 cpu_to_le16(q_id[i] | 5140 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 5141 5142 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 5143 ICE_NO_RESET, 0, NULL); 5144 if (status) 5145 break; 5146 5147 ice_free_sched_node(pi, node); 5148 } 5149 5150 mutex_unlock(&pi->sched_lock); 5151 return status; 5152 } 5153 5154 /** 5155 * ice_aq_get_cgu_abilities - get cgu abilities 5156 * @hw: pointer to the HW struct 5157 * @abilities: CGU abilities 5158 * 5159 * Get CGU abilities (0x0C61) 5160 * Return: 0 on success or negative value on failure. 5161 */ 5162 int 5163 ice_aq_get_cgu_abilities(struct ice_hw *hw, 5164 struct ice_aqc_get_cgu_abilities *abilities) 5165 { 5166 struct ice_aq_desc desc; 5167 5168 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 5169 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 5170 } 5171 5172 /** 5173 * ice_aq_set_input_pin_cfg - set input pin config 5174 * @hw: pointer to the HW struct 5175 * @input_idx: Input index 5176 * @flags1: Input flags 5177 * @flags2: Input flags 5178 * @freq: Frequency in Hz 5179 * @phase_delay: Delay in ps 5180 * 5181 * Set CGU input config (0x0C62) 5182 * Return: 0 on success or negative value on failure. 5183 */ 5184 int 5185 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5186 u32 freq, s32 phase_delay) 5187 { 5188 struct ice_aqc_set_cgu_input_config *cmd; 5189 struct ice_aq_desc desc; 5190 5191 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5192 cmd = &desc.params.set_cgu_input_config; 5193 cmd->input_idx = input_idx; 5194 cmd->flags1 = flags1; 5195 cmd->flags2 = flags2; 5196 cmd->freq = cpu_to_le32(freq); 5197 cmd->phase_delay = cpu_to_le32(phase_delay); 5198 5199 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5200 } 5201 5202 /** 5203 * ice_aq_get_input_pin_cfg - get input pin config 5204 * @hw: pointer to the HW struct 5205 * @input_idx: Input index 5206 * @status: Pin status 5207 * @type: Pin type 5208 * @flags1: Input flags 5209 * @flags2: Input flags 5210 * @freq: Frequency in Hz 5211 * @phase_delay: Delay in ps 5212 * 5213 * Get CGU input config (0x0C63) 5214 * Return: 0 on success or negative value on failure. 5215 */ 5216 int 5217 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5218 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5219 { 5220 struct ice_aqc_get_cgu_input_config *cmd; 5221 struct ice_aq_desc desc; 5222 int ret; 5223 5224 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5225 cmd = &desc.params.get_cgu_input_config; 5226 cmd->input_idx = input_idx; 5227 5228 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5229 if (!ret) { 5230 if (status) 5231 *status = cmd->status; 5232 if (type) 5233 *type = cmd->type; 5234 if (flags1) 5235 *flags1 = cmd->flags1; 5236 if (flags2) 5237 *flags2 = cmd->flags2; 5238 if (freq) 5239 *freq = le32_to_cpu(cmd->freq); 5240 if (phase_delay) 5241 *phase_delay = le32_to_cpu(cmd->phase_delay); 5242 } 5243 5244 return ret; 5245 } 5246 5247 /** 5248 * ice_aq_set_output_pin_cfg - set output pin config 5249 * @hw: pointer to the HW struct 5250 * @output_idx: Output index 5251 * @flags: Output flags 5252 * @src_sel: Index of DPLL block 5253 * @freq: Output frequency 5254 * @phase_delay: Output phase compensation 5255 * 5256 * Set CGU output config (0x0C64) 5257 * Return: 0 on success or negative value on failure. 5258 */ 5259 int 5260 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5261 u8 src_sel, u32 freq, s32 phase_delay) 5262 { 5263 struct ice_aqc_set_cgu_output_config *cmd; 5264 struct ice_aq_desc desc; 5265 5266 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5267 cmd = &desc.params.set_cgu_output_config; 5268 cmd->output_idx = output_idx; 5269 cmd->flags = flags; 5270 cmd->src_sel = src_sel; 5271 cmd->freq = cpu_to_le32(freq); 5272 cmd->phase_delay = cpu_to_le32(phase_delay); 5273 5274 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5275 } 5276 5277 /** 5278 * ice_aq_get_output_pin_cfg - get output pin config 5279 * @hw: pointer to the HW struct 5280 * @output_idx: Output index 5281 * @flags: Output flags 5282 * @src_sel: Internal DPLL source 5283 * @freq: Output frequency 5284 * @src_freq: Source frequency 5285 * 5286 * Get CGU output config (0x0C65) 5287 * Return: 0 on success or negative value on failure. 5288 */ 5289 int 5290 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5291 u8 *src_sel, u32 *freq, u32 *src_freq) 5292 { 5293 struct ice_aqc_get_cgu_output_config *cmd; 5294 struct ice_aq_desc desc; 5295 int ret; 5296 5297 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5298 cmd = &desc.params.get_cgu_output_config; 5299 cmd->output_idx = output_idx; 5300 5301 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5302 if (!ret) { 5303 if (flags) 5304 *flags = cmd->flags; 5305 if (src_sel) 5306 *src_sel = cmd->src_sel; 5307 if (freq) 5308 *freq = le32_to_cpu(cmd->freq); 5309 if (src_freq) 5310 *src_freq = le32_to_cpu(cmd->src_freq); 5311 } 5312 5313 return ret; 5314 } 5315 5316 /** 5317 * ice_aq_get_cgu_dpll_status - get dpll status 5318 * @hw: pointer to the HW struct 5319 * @dpll_num: DPLL index 5320 * @ref_state: Reference clock state 5321 * @config: current DPLL config 5322 * @dpll_state: current DPLL state 5323 * @phase_offset: Phase offset in ns 5324 * @eec_mode: EEC_mode 5325 * 5326 * Get CGU DPLL status (0x0C66) 5327 * Return: 0 on success or negative value on failure. 5328 */ 5329 int 5330 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5331 u8 *dpll_state, u8 *config, s64 *phase_offset, 5332 u8 *eec_mode) 5333 { 5334 struct ice_aqc_get_cgu_dpll_status *cmd; 5335 const s64 nsec_per_psec = 1000LL; 5336 struct ice_aq_desc desc; 5337 int status; 5338 5339 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5340 cmd = &desc.params.get_cgu_dpll_status; 5341 cmd->dpll_num = dpll_num; 5342 5343 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5344 if (!status) { 5345 *ref_state = cmd->ref_state; 5346 *dpll_state = cmd->dpll_state; 5347 *config = cmd->config; 5348 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5349 *phase_offset <<= 32; 5350 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5351 *phase_offset = div64_s64(sign_extend64(*phase_offset, 47), 5352 nsec_per_psec); 5353 *eec_mode = cmd->eec_mode; 5354 } 5355 5356 return status; 5357 } 5358 5359 /** 5360 * ice_aq_set_cgu_dpll_config - set dpll config 5361 * @hw: pointer to the HW struct 5362 * @dpll_num: DPLL index 5363 * @ref_state: Reference clock state 5364 * @config: DPLL config 5365 * @eec_mode: EEC mode 5366 * 5367 * Set CGU DPLL config (0x0C67) 5368 * Return: 0 on success or negative value on failure. 5369 */ 5370 int 5371 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5372 u8 config, u8 eec_mode) 5373 { 5374 struct ice_aqc_set_cgu_dpll_config *cmd; 5375 struct ice_aq_desc desc; 5376 5377 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5378 cmd = &desc.params.set_cgu_dpll_config; 5379 cmd->dpll_num = dpll_num; 5380 cmd->ref_state = ref_state; 5381 cmd->config = config; 5382 cmd->eec_mode = eec_mode; 5383 5384 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5385 } 5386 5387 /** 5388 * ice_aq_set_cgu_ref_prio - set input reference priority 5389 * @hw: pointer to the HW struct 5390 * @dpll_num: DPLL index 5391 * @ref_idx: Reference pin index 5392 * @ref_priority: Reference input priority 5393 * 5394 * Set CGU reference priority (0x0C68) 5395 * Return: 0 on success or negative value on failure. 5396 */ 5397 int 5398 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5399 u8 ref_priority) 5400 { 5401 struct ice_aqc_set_cgu_ref_prio *cmd; 5402 struct ice_aq_desc desc; 5403 5404 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5405 cmd = &desc.params.set_cgu_ref_prio; 5406 cmd->dpll_num = dpll_num; 5407 cmd->ref_idx = ref_idx; 5408 cmd->ref_priority = ref_priority; 5409 5410 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5411 } 5412 5413 /** 5414 * ice_aq_get_cgu_ref_prio - get input reference priority 5415 * @hw: pointer to the HW struct 5416 * @dpll_num: DPLL index 5417 * @ref_idx: Reference pin index 5418 * @ref_prio: Reference input priority 5419 * 5420 * Get CGU reference priority (0x0C69) 5421 * Return: 0 on success or negative value on failure. 5422 */ 5423 int 5424 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5425 u8 *ref_prio) 5426 { 5427 struct ice_aqc_get_cgu_ref_prio *cmd; 5428 struct ice_aq_desc desc; 5429 int status; 5430 5431 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5432 cmd = &desc.params.get_cgu_ref_prio; 5433 cmd->dpll_num = dpll_num; 5434 cmd->ref_idx = ref_idx; 5435 5436 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5437 if (!status) 5438 *ref_prio = cmd->ref_priority; 5439 5440 return status; 5441 } 5442 5443 /** 5444 * ice_aq_get_cgu_info - get cgu info 5445 * @hw: pointer to the HW struct 5446 * @cgu_id: CGU ID 5447 * @cgu_cfg_ver: CGU config version 5448 * @cgu_fw_ver: CGU firmware version 5449 * 5450 * Get CGU info (0x0C6A) 5451 * Return: 0 on success or negative value on failure. 5452 */ 5453 int 5454 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5455 u32 *cgu_fw_ver) 5456 { 5457 struct ice_aqc_get_cgu_info *cmd; 5458 struct ice_aq_desc desc; 5459 int status; 5460 5461 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5462 cmd = &desc.params.get_cgu_info; 5463 5464 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5465 if (!status) { 5466 *cgu_id = le32_to_cpu(cmd->cgu_id); 5467 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5468 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5469 } 5470 5471 return status; 5472 } 5473 5474 /** 5475 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5476 * @hw: pointer to the HW struct 5477 * @phy_output: PHY reference clock output pin 5478 * @enable: GPIO state to be applied 5479 * @freq: PHY output frequency 5480 * 5481 * Set phy recovered clock as reference (0x0630) 5482 * Return: 0 on success or negative value on failure. 5483 */ 5484 int 5485 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5486 u32 *freq) 5487 { 5488 struct ice_aqc_set_phy_rec_clk_out *cmd; 5489 struct ice_aq_desc desc; 5490 int status; 5491 5492 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5493 cmd = &desc.params.set_phy_rec_clk_out; 5494 cmd->phy_output = phy_output; 5495 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5496 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5497 cmd->freq = cpu_to_le32(*freq); 5498 5499 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5500 if (!status) 5501 *freq = le32_to_cpu(cmd->freq); 5502 5503 return status; 5504 } 5505 5506 /** 5507 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5508 * @hw: pointer to the HW struct 5509 * @phy_output: PHY reference clock output pin 5510 * @port_num: Port number 5511 * @flags: PHY flags 5512 * @node_handle: PHY output frequency 5513 * 5514 * Get PHY recovered clock output info (0x0631) 5515 * Return: 0 on success or negative value on failure. 5516 */ 5517 int 5518 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5519 u8 *flags, u16 *node_handle) 5520 { 5521 struct ice_aqc_get_phy_rec_clk_out *cmd; 5522 struct ice_aq_desc desc; 5523 int status; 5524 5525 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5526 cmd = &desc.params.get_phy_rec_clk_out; 5527 cmd->phy_output = *phy_output; 5528 5529 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5530 if (!status) { 5531 *phy_output = cmd->phy_output; 5532 if (port_num) 5533 *port_num = cmd->port_num; 5534 if (flags) 5535 *flags = cmd->flags; 5536 if (node_handle) 5537 *node_handle = le16_to_cpu(cmd->node_handle); 5538 } 5539 5540 return status; 5541 } 5542 5543 /** 5544 * ice_replay_pre_init - replay pre initialization 5545 * @hw: pointer to the HW struct 5546 * 5547 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5548 */ 5549 static int ice_replay_pre_init(struct ice_hw *hw) 5550 { 5551 struct ice_switch_info *sw = hw->switch_info; 5552 u8 i; 5553 5554 /* Delete old entries from replay filter list head if there is any */ 5555 ice_rm_all_sw_replay_rule_info(hw); 5556 /* In start of replay, move entries into replay_rules list, it 5557 * will allow adding rules entries back to filt_rules list, 5558 * which is operational list. 5559 */ 5560 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5561 list_replace_init(&sw->recp_list[i].filt_rules, 5562 &sw->recp_list[i].filt_replay_rules); 5563 ice_sched_replay_agg_vsi_preinit(hw); 5564 5565 return 0; 5566 } 5567 5568 /** 5569 * ice_replay_vsi - replay VSI configuration 5570 * @hw: pointer to the HW struct 5571 * @vsi_handle: driver VSI handle 5572 * 5573 * Restore all VSI configuration after reset. It is required to call this 5574 * function with main VSI first. 5575 */ 5576 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5577 { 5578 int status; 5579 5580 if (!ice_is_vsi_valid(hw, vsi_handle)) 5581 return -EINVAL; 5582 5583 /* Replay pre-initialization if there is any */ 5584 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5585 status = ice_replay_pre_init(hw); 5586 if (status) 5587 return status; 5588 } 5589 /* Replay per VSI all RSS configurations */ 5590 status = ice_replay_rss_cfg(hw, vsi_handle); 5591 if (status) 5592 return status; 5593 /* Replay per VSI all filters */ 5594 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5595 if (!status) 5596 status = ice_replay_vsi_agg(hw, vsi_handle); 5597 return status; 5598 } 5599 5600 /** 5601 * ice_replay_post - post replay configuration cleanup 5602 * @hw: pointer to the HW struct 5603 * 5604 * Post replay cleanup. 5605 */ 5606 void ice_replay_post(struct ice_hw *hw) 5607 { 5608 /* Delete old entries from replay filter list head */ 5609 ice_rm_all_sw_replay_rule_info(hw); 5610 ice_sched_replay_agg(hw); 5611 } 5612 5613 /** 5614 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5615 * @hw: ptr to the hardware info 5616 * @reg: offset of 64 bit HW register to read from 5617 * @prev_stat_loaded: bool to specify if previous stats are loaded 5618 * @prev_stat: ptr to previous loaded stat value 5619 * @cur_stat: ptr to current stat value 5620 */ 5621 void 5622 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5623 u64 *prev_stat, u64 *cur_stat) 5624 { 5625 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5626 5627 /* device stats are not reset at PFR, they likely will not be zeroed 5628 * when the driver starts. Thus, save the value from the first read 5629 * without adding to the statistic value so that we report stats which 5630 * count up from zero. 5631 */ 5632 if (!prev_stat_loaded) { 5633 *prev_stat = new_data; 5634 return; 5635 } 5636 5637 /* Calculate the difference between the new and old values, and then 5638 * add it to the software stat value. 5639 */ 5640 if (new_data >= *prev_stat) 5641 *cur_stat += new_data - *prev_stat; 5642 else 5643 /* to manage the potential roll-over */ 5644 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5645 5646 /* Update the previously stored value to prepare for next read */ 5647 *prev_stat = new_data; 5648 } 5649 5650 /** 5651 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5652 * @hw: ptr to the hardware info 5653 * @reg: offset of HW register to read from 5654 * @prev_stat_loaded: bool to specify if previous stats are loaded 5655 * @prev_stat: ptr to previous loaded stat value 5656 * @cur_stat: ptr to current stat value 5657 */ 5658 void 5659 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5660 u64 *prev_stat, u64 *cur_stat) 5661 { 5662 u32 new_data; 5663 5664 new_data = rd32(hw, reg); 5665 5666 /* device stats are not reset at PFR, they likely will not be zeroed 5667 * when the driver starts. Thus, save the value from the first read 5668 * without adding to the statistic value so that we report stats which 5669 * count up from zero. 5670 */ 5671 if (!prev_stat_loaded) { 5672 *prev_stat = new_data; 5673 return; 5674 } 5675 5676 /* Calculate the difference between the new and old values, and then 5677 * add it to the software stat value. 5678 */ 5679 if (new_data >= *prev_stat) 5680 *cur_stat += new_data - *prev_stat; 5681 else 5682 /* to manage the potential roll-over */ 5683 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5684 5685 /* Update the previously stored value to prepare for next read */ 5686 *prev_stat = new_data; 5687 } 5688 5689 /** 5690 * ice_sched_query_elem - query element information from HW 5691 * @hw: pointer to the HW struct 5692 * @node_teid: node TEID to be queried 5693 * @buf: buffer to element information 5694 * 5695 * This function queries HW element information 5696 */ 5697 int 5698 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5699 struct ice_aqc_txsched_elem_data *buf) 5700 { 5701 u16 buf_size, num_elem_ret = 0; 5702 int status; 5703 5704 buf_size = sizeof(*buf); 5705 memset(buf, 0, buf_size); 5706 buf->node_teid = cpu_to_le32(node_teid); 5707 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5708 NULL); 5709 if (status || num_elem_ret != 1) 5710 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5711 return status; 5712 } 5713 5714 /** 5715 * ice_aq_read_i2c 5716 * @hw: pointer to the hw struct 5717 * @topo_addr: topology address for a device to communicate with 5718 * @bus_addr: 7-bit I2C bus address 5719 * @addr: I2C memory address (I2C offset) with up to 16 bits 5720 * @params: I2C parameters: bit [7] - Repeated start, 5721 * bits [6:5] data offset size, 5722 * bit [4] - I2C address type, 5723 * bits [3:0] - data size to read (0-16 bytes) 5724 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5725 * @cd: pointer to command details structure or NULL 5726 * 5727 * Read I2C (0x06E2) 5728 */ 5729 int 5730 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5731 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5732 struct ice_sq_cd *cd) 5733 { 5734 struct ice_aq_desc desc = { 0 }; 5735 struct ice_aqc_i2c *cmd; 5736 u8 data_size; 5737 int status; 5738 5739 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5740 cmd = &desc.params.read_write_i2c; 5741 5742 if (!data) 5743 return -EINVAL; 5744 5745 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5746 5747 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5748 cmd->topo_addr = topo_addr; 5749 cmd->i2c_params = params; 5750 cmd->i2c_addr = addr; 5751 5752 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5753 if (!status) { 5754 struct ice_aqc_read_i2c_resp *resp; 5755 u8 i; 5756 5757 resp = &desc.params.read_i2c_resp; 5758 for (i = 0; i < data_size; i++) { 5759 *data = resp->i2c_data[i]; 5760 data++; 5761 } 5762 } 5763 5764 return status; 5765 } 5766 5767 /** 5768 * ice_aq_write_i2c 5769 * @hw: pointer to the hw struct 5770 * @topo_addr: topology address for a device to communicate with 5771 * @bus_addr: 7-bit I2C bus address 5772 * @addr: I2C memory address (I2C offset) with up to 16 bits 5773 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5774 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5775 * @cd: pointer to command details structure or NULL 5776 * 5777 * Write I2C (0x06E3) 5778 * 5779 * * Return: 5780 * * 0 - Successful write to the i2c device 5781 * * -EINVAL - Data size greater than 4 bytes 5782 * * -EIO - FW error 5783 */ 5784 int 5785 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5786 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5787 struct ice_sq_cd *cd) 5788 { 5789 struct ice_aq_desc desc = { 0 }; 5790 struct ice_aqc_i2c *cmd; 5791 u8 data_size; 5792 5793 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5794 cmd = &desc.params.read_write_i2c; 5795 5796 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5797 5798 /* data_size limited to 4 */ 5799 if (data_size > 4) 5800 return -EINVAL; 5801 5802 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5803 cmd->topo_addr = topo_addr; 5804 cmd->i2c_params = params; 5805 cmd->i2c_addr = addr; 5806 5807 memcpy(cmd->i2c_data, data, data_size); 5808 5809 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5810 } 5811 5812 /** 5813 * ice_aq_set_gpio 5814 * @hw: pointer to the hw struct 5815 * @gpio_ctrl_handle: GPIO controller node handle 5816 * @pin_idx: IO Number of the GPIO that needs to be set 5817 * @value: SW provide IO value to set in the LSB 5818 * @cd: pointer to command details structure or NULL 5819 * 5820 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5821 */ 5822 int 5823 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5824 struct ice_sq_cd *cd) 5825 { 5826 struct ice_aqc_gpio *cmd; 5827 struct ice_aq_desc desc; 5828 5829 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5830 cmd = &desc.params.read_write_gpio; 5831 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5832 cmd->gpio_num = pin_idx; 5833 cmd->gpio_val = value ? 1 : 0; 5834 5835 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5836 } 5837 5838 /** 5839 * ice_aq_get_gpio 5840 * @hw: pointer to the hw struct 5841 * @gpio_ctrl_handle: GPIO controller node handle 5842 * @pin_idx: IO Number of the GPIO that needs to be set 5843 * @value: IO value read 5844 * @cd: pointer to command details structure or NULL 5845 * 5846 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5847 * the topology 5848 */ 5849 int 5850 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5851 bool *value, struct ice_sq_cd *cd) 5852 { 5853 struct ice_aqc_gpio *cmd; 5854 struct ice_aq_desc desc; 5855 int status; 5856 5857 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5858 cmd = &desc.params.read_write_gpio; 5859 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5860 cmd->gpio_num = pin_idx; 5861 5862 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5863 if (status) 5864 return status; 5865 5866 *value = !!cmd->gpio_val; 5867 return 0; 5868 } 5869 5870 /** 5871 * ice_is_fw_api_min_ver 5872 * @hw: pointer to the hardware structure 5873 * @maj: major version 5874 * @min: minor version 5875 * @patch: patch version 5876 * 5877 * Checks if the firmware API is minimum version 5878 */ 5879 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5880 { 5881 if (hw->api_maj_ver == maj) { 5882 if (hw->api_min_ver > min) 5883 return true; 5884 if (hw->api_min_ver == min && hw->api_patch >= patch) 5885 return true; 5886 } else if (hw->api_maj_ver > maj) { 5887 return true; 5888 } 5889 5890 return false; 5891 } 5892 5893 /** 5894 * ice_fw_supports_link_override 5895 * @hw: pointer to the hardware structure 5896 * 5897 * Checks if the firmware supports link override 5898 */ 5899 bool ice_fw_supports_link_override(struct ice_hw *hw) 5900 { 5901 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5902 ICE_FW_API_LINK_OVERRIDE_MIN, 5903 ICE_FW_API_LINK_OVERRIDE_PATCH); 5904 } 5905 5906 /** 5907 * ice_get_link_default_override 5908 * @ldo: pointer to the link default override struct 5909 * @pi: pointer to the port info struct 5910 * 5911 * Gets the link default override for a port 5912 */ 5913 int 5914 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5915 struct ice_port_info *pi) 5916 { 5917 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5918 struct ice_hw *hw = pi->hw; 5919 int status; 5920 5921 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5922 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5923 if (status) { 5924 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5925 return status; 5926 } 5927 5928 /* Each port has its own config; calculate for our port */ 5929 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5930 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5931 5932 /* link options first */ 5933 status = ice_read_sr_word(hw, tlv_start, &buf); 5934 if (status) { 5935 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5936 return status; 5937 } 5938 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 5939 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5940 ICE_LINK_OVERRIDE_PHY_CFG_S; 5941 5942 /* link PHY config */ 5943 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5944 status = ice_read_sr_word(hw, offset, &buf); 5945 if (status) { 5946 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5947 return status; 5948 } 5949 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 5950 5951 /* PHY types low */ 5952 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 5953 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5954 status = ice_read_sr_word(hw, (offset + i), &buf); 5955 if (status) { 5956 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5957 return status; 5958 } 5959 /* shift 16 bits at a time to fill 64 bits */ 5960 ldo->phy_type_low |= ((u64)buf << (i * 16)); 5961 } 5962 5963 /* PHY types high */ 5964 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 5965 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 5966 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 5967 status = ice_read_sr_word(hw, (offset + i), &buf); 5968 if (status) { 5969 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5970 return status; 5971 } 5972 /* shift 16 bits at a time to fill 64 bits */ 5973 ldo->phy_type_high |= ((u64)buf << (i * 16)); 5974 } 5975 5976 return status; 5977 } 5978 5979 /** 5980 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 5981 * @caps: get PHY capability data 5982 */ 5983 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 5984 { 5985 if (caps->caps & ICE_AQC_PHY_AN_MODE || 5986 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 5987 ICE_AQC_PHY_AN_EN_CLAUSE73 | 5988 ICE_AQC_PHY_AN_EN_CLAUSE37)) 5989 return true; 5990 5991 return false; 5992 } 5993 5994 /** 5995 * ice_aq_set_lldp_mib - Set the LLDP MIB 5996 * @hw: pointer to the HW struct 5997 * @mib_type: Local, Remote or both Local and Remote MIBs 5998 * @buf: pointer to the caller-supplied buffer to store the MIB block 5999 * @buf_size: size of the buffer (in bytes) 6000 * @cd: pointer to command details structure or NULL 6001 * 6002 * Set the LLDP MIB. (0x0A08) 6003 */ 6004 int 6005 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 6006 struct ice_sq_cd *cd) 6007 { 6008 struct ice_aqc_lldp_set_local_mib *cmd; 6009 struct ice_aq_desc desc; 6010 6011 cmd = &desc.params.lldp_set_mib; 6012 6013 if (buf_size == 0 || !buf) 6014 return -EINVAL; 6015 6016 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 6017 6018 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 6019 desc.datalen = cpu_to_le16(buf_size); 6020 6021 cmd->type = mib_type; 6022 cmd->length = cpu_to_le16(buf_size); 6023 6024 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 6025 } 6026 6027 /** 6028 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 6029 * @hw: pointer to HW struct 6030 */ 6031 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 6032 { 6033 if (hw->mac_type != ICE_MAC_E810) 6034 return false; 6035 6036 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 6037 ICE_FW_API_LLDP_FLTR_MIN, 6038 ICE_FW_API_LLDP_FLTR_PATCH); 6039 } 6040 6041 /** 6042 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6043 * @hw: pointer to HW struct 6044 * @vsi_num: absolute HW index for VSI 6045 * @add: boolean for if adding or removing a filter 6046 */ 6047 int 6048 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6049 { 6050 struct ice_aqc_lldp_filter_ctrl *cmd; 6051 struct ice_aq_desc desc; 6052 6053 cmd = &desc.params.lldp_filter_ctrl; 6054 6055 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 6056 6057 if (add) 6058 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 6059 else 6060 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6061 6062 cmd->vsi_num = cpu_to_le16(vsi_num); 6063 6064 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6065 } 6066 6067 /** 6068 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6069 * @hw: pointer to HW struct 6070 */ 6071 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 6072 { 6073 struct ice_aq_desc desc; 6074 6075 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 6076 6077 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6078 } 6079 6080 /** 6081 * ice_fw_supports_report_dflt_cfg 6082 * @hw: pointer to the hardware structure 6083 * 6084 * Checks if the firmware supports report default configuration 6085 */ 6086 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6087 { 6088 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6089 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6090 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6091 } 6092 6093 /* each of the indexes into the following array match the speed of a return 6094 * value from the list of AQ returned speeds like the range: 6095 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6096 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 6097 * array. The array is defined as 15 elements long because the link_speed 6098 * returned by the firmware is a 16 bit * value, but is indexed 6099 * by [fls(speed) - 1] 6100 */ 6101 static const u32 ice_aq_to_link_speed[] = { 6102 SPEED_10, /* BIT(0) */ 6103 SPEED_100, 6104 SPEED_1000, 6105 SPEED_2500, 6106 SPEED_5000, 6107 SPEED_10000, 6108 SPEED_20000, 6109 SPEED_25000, 6110 SPEED_40000, 6111 SPEED_50000, 6112 SPEED_100000, /* BIT(10) */ 6113 SPEED_200000, 6114 }; 6115 6116 /** 6117 * ice_get_link_speed - get integer speed from table 6118 * @index: array index from fls(aq speed) - 1 6119 * 6120 * Returns: u32 value containing integer speed 6121 */ 6122 u32 ice_get_link_speed(u16 index) 6123 { 6124 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 6125 return 0; 6126 6127 return ice_aq_to_link_speed[index]; 6128 } 6129