1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018-2023, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_sched.h" 6 #include "ice_adminq_cmd.h" 7 #include "ice_flow.h" 8 #include "ice_ptp_hw.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 #define ICE_MAX_NETLIST_SIZE 10 12 13 static const char * const ice_link_mode_str_low[] = { 14 [0] = "100BASE_TX", 15 [1] = "100M_SGMII", 16 [2] = "1000BASE_T", 17 [3] = "1000BASE_SX", 18 [4] = "1000BASE_LX", 19 [5] = "1000BASE_KX", 20 [6] = "1G_SGMII", 21 [7] = "2500BASE_T", 22 [8] = "2500BASE_X", 23 [9] = "2500BASE_KX", 24 [10] = "5GBASE_T", 25 [11] = "5GBASE_KR", 26 [12] = "10GBASE_T", 27 [13] = "10G_SFI_DA", 28 [14] = "10GBASE_SR", 29 [15] = "10GBASE_LR", 30 [16] = "10GBASE_KR_CR1", 31 [17] = "10G_SFI_AOC_ACC", 32 [18] = "10G_SFI_C2C", 33 [19] = "25GBASE_T", 34 [20] = "25GBASE_CR", 35 [21] = "25GBASE_CR_S", 36 [22] = "25GBASE_CR1", 37 [23] = "25GBASE_SR", 38 [24] = "25GBASE_LR", 39 [25] = "25GBASE_KR", 40 [26] = "25GBASE_KR_S", 41 [27] = "25GBASE_KR1", 42 [28] = "25G_AUI_AOC_ACC", 43 [29] = "25G_AUI_C2C", 44 [30] = "40GBASE_CR4", 45 [31] = "40GBASE_SR4", 46 [32] = "40GBASE_LR4", 47 [33] = "40GBASE_KR4", 48 [34] = "40G_XLAUI_AOC_ACC", 49 [35] = "40G_XLAUI", 50 [36] = "50GBASE_CR2", 51 [37] = "50GBASE_SR2", 52 [38] = "50GBASE_LR2", 53 [39] = "50GBASE_KR2", 54 [40] = "50G_LAUI2_AOC_ACC", 55 [41] = "50G_LAUI2", 56 [42] = "50G_AUI2_AOC_ACC", 57 [43] = "50G_AUI2", 58 [44] = "50GBASE_CP", 59 [45] = "50GBASE_SR", 60 [46] = "50GBASE_FR", 61 [47] = "50GBASE_LR", 62 [48] = "50GBASE_KR_PAM4", 63 [49] = "50G_AUI1_AOC_ACC", 64 [50] = "50G_AUI1", 65 [51] = "100GBASE_CR4", 66 [52] = "100GBASE_SR4", 67 [53] = "100GBASE_LR4", 68 [54] = "100GBASE_KR4", 69 [55] = "100G_CAUI4_AOC_ACC", 70 [56] = "100G_CAUI4", 71 [57] = "100G_AUI4_AOC_ACC", 72 [58] = "100G_AUI4", 73 [59] = "100GBASE_CR_PAM4", 74 [60] = "100GBASE_KR_PAM4", 75 [61] = "100GBASE_CP2", 76 [62] = "100GBASE_SR2", 77 [63] = "100GBASE_DR", 78 }; 79 80 static const char * const ice_link_mode_str_high[] = { 81 [0] = "100GBASE_KR2_PAM4", 82 [1] = "100G_CAUI2_AOC_ACC", 83 [2] = "100G_CAUI2", 84 [3] = "100G_AUI2_AOC_ACC", 85 [4] = "100G_AUI2", 86 }; 87 88 /** 89 * ice_dump_phy_type - helper function to dump phy_type 90 * @hw: pointer to the HW structure 91 * @low: 64 bit value for phy_type_low 92 * @high: 64 bit value for phy_type_high 93 * @prefix: prefix string to differentiate multiple dumps 94 */ 95 static void 96 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 97 { 98 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low); 99 100 for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) { 101 if (low & BIT_ULL(i)) 102 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 103 prefix, i, ice_link_mode_str_low[i]); 104 } 105 106 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high); 107 108 for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) { 109 if (high & BIT_ULL(i)) 110 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 111 prefix, i, ice_link_mode_str_high[i]); 112 } 113 } 114 115 /** 116 * ice_set_mac_type - Sets MAC type 117 * @hw: pointer to the HW structure 118 * 119 * This function sets the MAC type of the adapter based on the 120 * vendor ID and device ID stored in the HW structure. 121 */ 122 static int ice_set_mac_type(struct ice_hw *hw) 123 { 124 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 125 return -ENODEV; 126 127 switch (hw->device_id) { 128 case ICE_DEV_ID_E810C_BACKPLANE: 129 case ICE_DEV_ID_E810C_QSFP: 130 case ICE_DEV_ID_E810C_SFP: 131 case ICE_DEV_ID_E810_XXV_BACKPLANE: 132 case ICE_DEV_ID_E810_XXV_QSFP: 133 case ICE_DEV_ID_E810_XXV_SFP: 134 hw->mac_type = ICE_MAC_E810; 135 break; 136 case ICE_DEV_ID_E823C_10G_BASE_T: 137 case ICE_DEV_ID_E823C_BACKPLANE: 138 case ICE_DEV_ID_E823C_QSFP: 139 case ICE_DEV_ID_E823C_SFP: 140 case ICE_DEV_ID_E823C_SGMII: 141 case ICE_DEV_ID_E822C_10G_BASE_T: 142 case ICE_DEV_ID_E822C_BACKPLANE: 143 case ICE_DEV_ID_E822C_QSFP: 144 case ICE_DEV_ID_E822C_SFP: 145 case ICE_DEV_ID_E822C_SGMII: 146 case ICE_DEV_ID_E822L_10G_BASE_T: 147 case ICE_DEV_ID_E822L_BACKPLANE: 148 case ICE_DEV_ID_E822L_SFP: 149 case ICE_DEV_ID_E822L_SGMII: 150 case ICE_DEV_ID_E823L_10G_BASE_T: 151 case ICE_DEV_ID_E823L_1GBE: 152 case ICE_DEV_ID_E823L_BACKPLANE: 153 case ICE_DEV_ID_E823L_QSFP: 154 case ICE_DEV_ID_E823L_SFP: 155 hw->mac_type = ICE_MAC_GENERIC; 156 break; 157 case ICE_DEV_ID_E830_BACKPLANE: 158 case ICE_DEV_ID_E830_QSFP56: 159 case ICE_DEV_ID_E830_SFP: 160 case ICE_DEV_ID_E830_SFP_DD: 161 hw->mac_type = ICE_MAC_E830; 162 break; 163 default: 164 hw->mac_type = ICE_MAC_UNKNOWN; 165 break; 166 } 167 168 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 169 return 0; 170 } 171 172 /** 173 * ice_is_e810 174 * @hw: pointer to the hardware structure 175 * 176 * returns true if the device is E810 based, false if not. 177 */ 178 bool ice_is_e810(struct ice_hw *hw) 179 { 180 return hw->mac_type == ICE_MAC_E810; 181 } 182 183 /** 184 * ice_is_e810t 185 * @hw: pointer to the hardware structure 186 * 187 * returns true if the device is E810T based, false if not. 188 */ 189 bool ice_is_e810t(struct ice_hw *hw) 190 { 191 switch (hw->device_id) { 192 case ICE_DEV_ID_E810C_SFP: 193 switch (hw->subsystem_device_id) { 194 case ICE_SUBDEV_ID_E810T: 195 case ICE_SUBDEV_ID_E810T2: 196 case ICE_SUBDEV_ID_E810T3: 197 case ICE_SUBDEV_ID_E810T4: 198 case ICE_SUBDEV_ID_E810T6: 199 case ICE_SUBDEV_ID_E810T7: 200 return true; 201 } 202 break; 203 case ICE_DEV_ID_E810C_QSFP: 204 switch (hw->subsystem_device_id) { 205 case ICE_SUBDEV_ID_E810T2: 206 case ICE_SUBDEV_ID_E810T3: 207 case ICE_SUBDEV_ID_E810T5: 208 return true; 209 } 210 break; 211 default: 212 break; 213 } 214 215 return false; 216 } 217 218 /** 219 * ice_is_e823 220 * @hw: pointer to the hardware structure 221 * 222 * returns true if the device is E823-L or E823-C based, false if not. 223 */ 224 bool ice_is_e823(struct ice_hw *hw) 225 { 226 switch (hw->device_id) { 227 case ICE_DEV_ID_E823L_BACKPLANE: 228 case ICE_DEV_ID_E823L_SFP: 229 case ICE_DEV_ID_E823L_10G_BASE_T: 230 case ICE_DEV_ID_E823L_1GBE: 231 case ICE_DEV_ID_E823L_QSFP: 232 case ICE_DEV_ID_E823C_BACKPLANE: 233 case ICE_DEV_ID_E823C_QSFP: 234 case ICE_DEV_ID_E823C_SFP: 235 case ICE_DEV_ID_E823C_10G_BASE_T: 236 case ICE_DEV_ID_E823C_SGMII: 237 return true; 238 default: 239 return false; 240 } 241 } 242 243 /** 244 * ice_clear_pf_cfg - Clear PF configuration 245 * @hw: pointer to the hardware structure 246 * 247 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 248 * configuration, flow director filters, etc.). 249 */ 250 int ice_clear_pf_cfg(struct ice_hw *hw) 251 { 252 struct ice_aq_desc desc; 253 254 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 255 256 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 257 } 258 259 /** 260 * ice_aq_manage_mac_read - manage MAC address read command 261 * @hw: pointer to the HW struct 262 * @buf: a virtual buffer to hold the manage MAC read response 263 * @buf_size: Size of the virtual buffer 264 * @cd: pointer to command details structure or NULL 265 * 266 * This function is used to return per PF station MAC address (0x0107). 267 * NOTE: Upon successful completion of this command, MAC address information 268 * is returned in user specified buffer. Please interpret user specified 269 * buffer as "manage_mac_read" response. 270 * Response such as various MAC addresses are stored in HW struct (port.mac) 271 * ice_discover_dev_caps is expected to be called before this function is 272 * called. 273 */ 274 static int 275 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 276 struct ice_sq_cd *cd) 277 { 278 struct ice_aqc_manage_mac_read_resp *resp; 279 struct ice_aqc_manage_mac_read *cmd; 280 struct ice_aq_desc desc; 281 int status; 282 u16 flags; 283 u8 i; 284 285 cmd = &desc.params.mac_read; 286 287 if (buf_size < sizeof(*resp)) 288 return -EINVAL; 289 290 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 291 292 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 293 if (status) 294 return status; 295 296 resp = buf; 297 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 298 299 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 300 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 301 return -EIO; 302 } 303 304 /* A single port can report up to two (LAN and WoL) addresses */ 305 for (i = 0; i < cmd->num_addr; i++) 306 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 307 ether_addr_copy(hw->port_info->mac.lan_addr, 308 resp[i].mac_addr); 309 ether_addr_copy(hw->port_info->mac.perm_addr, 310 resp[i].mac_addr); 311 break; 312 } 313 314 return 0; 315 } 316 317 /** 318 * ice_aq_get_phy_caps - returns PHY capabilities 319 * @pi: port information structure 320 * @qual_mods: report qualified modules 321 * @report_mode: report mode capabilities 322 * @pcaps: structure for PHY capabilities to be filled 323 * @cd: pointer to command details structure or NULL 324 * 325 * Returns the various PHY capabilities supported on the Port (0x0600) 326 */ 327 int 328 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 329 struct ice_aqc_get_phy_caps_data *pcaps, 330 struct ice_sq_cd *cd) 331 { 332 struct ice_aqc_get_phy_caps *cmd; 333 u16 pcaps_size = sizeof(*pcaps); 334 struct ice_aq_desc desc; 335 const char *prefix; 336 struct ice_hw *hw; 337 int status; 338 339 cmd = &desc.params.get_phy; 340 341 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 342 return -EINVAL; 343 hw = pi->hw; 344 345 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 346 !ice_fw_supports_report_dflt_cfg(hw)) 347 return -EINVAL; 348 349 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 350 351 if (qual_mods) 352 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 353 354 cmd->param0 |= cpu_to_le16(report_mode); 355 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 356 357 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 358 359 switch (report_mode) { 360 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 361 prefix = "phy_caps_media"; 362 break; 363 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 364 prefix = "phy_caps_no_media"; 365 break; 366 case ICE_AQC_REPORT_ACTIVE_CFG: 367 prefix = "phy_caps_active"; 368 break; 369 case ICE_AQC_REPORT_DFLT_CFG: 370 prefix = "phy_caps_default"; 371 break; 372 default: 373 prefix = "phy_caps_invalid"; 374 } 375 376 ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low), 377 le64_to_cpu(pcaps->phy_type_high), prefix); 378 379 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 380 prefix, report_mode); 381 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 382 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 383 pcaps->low_power_ctrl_an); 384 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 385 pcaps->eee_cap); 386 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 387 pcaps->eeer_value); 388 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 389 pcaps->link_fec_options); 390 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 391 prefix, pcaps->module_compliance_enforcement); 392 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 393 prefix, pcaps->extended_compliance_code); 394 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 395 pcaps->module_type[0]); 396 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 397 pcaps->module_type[1]); 398 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 399 pcaps->module_type[2]); 400 401 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 402 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 403 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 404 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 405 sizeof(pi->phy.link_info.module_type)); 406 } 407 408 return status; 409 } 410 411 /** 412 * ice_aq_get_link_topo_handle - get link topology node return status 413 * @pi: port information structure 414 * @node_type: requested node type 415 * @cd: pointer to command details structure or NULL 416 * 417 * Get link topology node return status for specified node type (0x06E0) 418 * 419 * Node type cage can be used to determine if cage is present. If AQC 420 * returns error (ENOENT), then no cage present. If no cage present, then 421 * connection type is backplane or BASE-T. 422 */ 423 static int 424 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 425 struct ice_sq_cd *cd) 426 { 427 struct ice_aqc_get_link_topo *cmd; 428 struct ice_aq_desc desc; 429 430 cmd = &desc.params.get_link_topo; 431 432 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 433 434 cmd->addr.topo_params.node_type_ctx = 435 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 436 ICE_AQC_LINK_TOPO_NODE_CTX_S); 437 438 /* set node type */ 439 cmd->addr.topo_params.node_type_ctx |= 440 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 441 442 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 443 } 444 445 /** 446 * ice_aq_get_netlist_node 447 * @hw: pointer to the hw struct 448 * @cmd: get_link_topo AQ structure 449 * @node_part_number: output node part number if node found 450 * @node_handle: output node handle parameter if node found 451 * 452 * Get netlist node handle. 453 */ 454 int 455 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 456 u8 *node_part_number, u16 *node_handle) 457 { 458 struct ice_aq_desc desc; 459 460 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 461 desc.params.get_link_topo = *cmd; 462 463 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 464 return -EINTR; 465 466 if (node_handle) 467 *node_handle = 468 le16_to_cpu(desc.params.get_link_topo.addr.handle); 469 if (node_part_number) 470 *node_part_number = desc.params.get_link_topo.node_part_num; 471 472 return 0; 473 } 474 475 /** 476 * ice_find_netlist_node 477 * @hw: pointer to the hw struct 478 * @node_type_ctx: type of netlist node to look for 479 * @node_part_number: node part number to look for 480 * @node_handle: output parameter if node found - optional 481 * 482 * Scan the netlist for a node handle of the given node type and part number. 483 * 484 * If node_handle is non-NULL it will be modified on function exit. It is only 485 * valid if the function returns zero, and should be ignored on any non-zero 486 * return value. 487 * 488 * Returns: 0 if the node is found, -ENOENT if no handle was found, and 489 * a negative error code on failure to access the AQ. 490 */ 491 static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, 492 u8 node_part_number, u16 *node_handle) 493 { 494 u8 idx; 495 496 for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) { 497 struct ice_aqc_get_link_topo cmd = {}; 498 u8 rec_node_part_number; 499 int status; 500 501 cmd.addr.topo_params.node_type_ctx = 502 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, 503 node_type_ctx); 504 cmd.addr.topo_params.index = idx; 505 506 status = ice_aq_get_netlist_node(hw, &cmd, 507 &rec_node_part_number, 508 node_handle); 509 if (status) 510 return status; 511 512 if (rec_node_part_number == node_part_number) 513 return 0; 514 } 515 516 return -ENOENT; 517 } 518 519 /** 520 * ice_is_media_cage_present 521 * @pi: port information structure 522 * 523 * Returns true if media cage is present, else false. If no cage, then 524 * media type is backplane or BASE-T. 525 */ 526 static bool ice_is_media_cage_present(struct ice_port_info *pi) 527 { 528 /* Node type cage can be used to determine if cage is present. If AQC 529 * returns error (ENOENT), then no cage present. If no cage present then 530 * connection type is backplane or BASE-T. 531 */ 532 return !ice_aq_get_link_topo_handle(pi, 533 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 534 NULL); 535 } 536 537 /** 538 * ice_get_media_type - Gets media type 539 * @pi: port information structure 540 */ 541 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 542 { 543 struct ice_link_status *hw_link_info; 544 545 if (!pi) 546 return ICE_MEDIA_UNKNOWN; 547 548 hw_link_info = &pi->phy.link_info; 549 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 550 /* If more than one media type is selected, report unknown */ 551 return ICE_MEDIA_UNKNOWN; 552 553 if (hw_link_info->phy_type_low) { 554 /* 1G SGMII is a special case where some DA cable PHYs 555 * may show this as an option when it really shouldn't 556 * be since SGMII is meant to be between a MAC and a PHY 557 * in a backplane. Try to detect this case and handle it 558 */ 559 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 560 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 561 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 562 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 563 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 564 return ICE_MEDIA_DA; 565 566 switch (hw_link_info->phy_type_low) { 567 case ICE_PHY_TYPE_LOW_1000BASE_SX: 568 case ICE_PHY_TYPE_LOW_1000BASE_LX: 569 case ICE_PHY_TYPE_LOW_10GBASE_SR: 570 case ICE_PHY_TYPE_LOW_10GBASE_LR: 571 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 572 case ICE_PHY_TYPE_LOW_25GBASE_SR: 573 case ICE_PHY_TYPE_LOW_25GBASE_LR: 574 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 575 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 576 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 577 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 578 case ICE_PHY_TYPE_LOW_50GBASE_SR: 579 case ICE_PHY_TYPE_LOW_50GBASE_FR: 580 case ICE_PHY_TYPE_LOW_50GBASE_LR: 581 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 582 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 583 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 584 case ICE_PHY_TYPE_LOW_100GBASE_DR: 585 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 586 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 587 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 588 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 589 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 590 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 591 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 592 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 593 return ICE_MEDIA_FIBER; 594 case ICE_PHY_TYPE_LOW_100BASE_TX: 595 case ICE_PHY_TYPE_LOW_1000BASE_T: 596 case ICE_PHY_TYPE_LOW_2500BASE_T: 597 case ICE_PHY_TYPE_LOW_5GBASE_T: 598 case ICE_PHY_TYPE_LOW_10GBASE_T: 599 case ICE_PHY_TYPE_LOW_25GBASE_T: 600 return ICE_MEDIA_BASET; 601 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 602 case ICE_PHY_TYPE_LOW_25GBASE_CR: 603 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 604 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 605 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 606 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 607 case ICE_PHY_TYPE_LOW_50GBASE_CP: 608 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 609 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 610 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 611 return ICE_MEDIA_DA; 612 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 613 case ICE_PHY_TYPE_LOW_40G_XLAUI: 614 case ICE_PHY_TYPE_LOW_50G_LAUI2: 615 case ICE_PHY_TYPE_LOW_50G_AUI2: 616 case ICE_PHY_TYPE_LOW_50G_AUI1: 617 case ICE_PHY_TYPE_LOW_100G_AUI4: 618 case ICE_PHY_TYPE_LOW_100G_CAUI4: 619 if (ice_is_media_cage_present(pi)) 620 return ICE_MEDIA_DA; 621 fallthrough; 622 case ICE_PHY_TYPE_LOW_1000BASE_KX: 623 case ICE_PHY_TYPE_LOW_2500BASE_KX: 624 case ICE_PHY_TYPE_LOW_2500BASE_X: 625 case ICE_PHY_TYPE_LOW_5GBASE_KR: 626 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 627 case ICE_PHY_TYPE_LOW_25GBASE_KR: 628 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 629 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 630 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 631 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 632 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 633 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 634 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 635 return ICE_MEDIA_BACKPLANE; 636 } 637 } else { 638 switch (hw_link_info->phy_type_high) { 639 case ICE_PHY_TYPE_HIGH_100G_AUI2: 640 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 641 if (ice_is_media_cage_present(pi)) 642 return ICE_MEDIA_DA; 643 fallthrough; 644 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 645 return ICE_MEDIA_BACKPLANE; 646 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 647 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 648 return ICE_MEDIA_FIBER; 649 } 650 } 651 return ICE_MEDIA_UNKNOWN; 652 } 653 654 /** 655 * ice_get_link_status_datalen 656 * @hw: pointer to the HW struct 657 * 658 * Returns datalength for the Get Link Status AQ command, which is bigger for 659 * newer adapter families handled by ice driver. 660 */ 661 static u16 ice_get_link_status_datalen(struct ice_hw *hw) 662 { 663 switch (hw->mac_type) { 664 case ICE_MAC_E830: 665 return ICE_AQC_LS_DATA_SIZE_V2; 666 case ICE_MAC_E810: 667 default: 668 return ICE_AQC_LS_DATA_SIZE_V1; 669 } 670 } 671 672 /** 673 * ice_aq_get_link_info 674 * @pi: port information structure 675 * @ena_lse: enable/disable LinkStatusEvent reporting 676 * @link: pointer to link status structure - optional 677 * @cd: pointer to command details structure or NULL 678 * 679 * Get Link Status (0x607). Returns the link status of the adapter. 680 */ 681 int 682 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 683 struct ice_link_status *link, struct ice_sq_cd *cd) 684 { 685 struct ice_aqc_get_link_status_data link_data = { 0 }; 686 struct ice_aqc_get_link_status *resp; 687 struct ice_link_status *li_old, *li; 688 enum ice_media_type *hw_media_type; 689 struct ice_fc_info *hw_fc_info; 690 bool tx_pause, rx_pause; 691 struct ice_aq_desc desc; 692 struct ice_hw *hw; 693 u16 cmd_flags; 694 int status; 695 696 if (!pi) 697 return -EINVAL; 698 hw = pi->hw; 699 li_old = &pi->phy.link_info_old; 700 hw_media_type = &pi->phy.media_type; 701 li = &pi->phy.link_info; 702 hw_fc_info = &pi->fc; 703 704 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 705 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 706 resp = &desc.params.get_link_status; 707 resp->cmd_flags = cpu_to_le16(cmd_flags); 708 resp->lport_num = pi->lport; 709 710 status = ice_aq_send_cmd(hw, &desc, &link_data, 711 ice_get_link_status_datalen(hw), cd); 712 if (status) 713 return status; 714 715 /* save off old link status information */ 716 *li_old = *li; 717 718 /* update current link status information */ 719 li->link_speed = le16_to_cpu(link_data.link_speed); 720 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 721 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 722 *hw_media_type = ice_get_media_type(pi); 723 li->link_info = link_data.link_info; 724 li->link_cfg_err = link_data.link_cfg_err; 725 li->an_info = link_data.an_info; 726 li->ext_info = link_data.ext_info; 727 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 728 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 729 li->topo_media_conflict = link_data.topo_media_conflict; 730 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 731 ICE_AQ_CFG_PACING_TYPE_M); 732 733 /* update fc info */ 734 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 735 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 736 if (tx_pause && rx_pause) 737 hw_fc_info->current_mode = ICE_FC_FULL; 738 else if (tx_pause) 739 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 740 else if (rx_pause) 741 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 742 else 743 hw_fc_info->current_mode = ICE_FC_NONE; 744 745 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 746 747 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 748 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 749 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 750 (unsigned long long)li->phy_type_low); 751 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 752 (unsigned long long)li->phy_type_high); 753 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 754 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 755 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 756 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 757 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 758 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 759 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 760 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 761 li->max_frame_size); 762 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 763 764 /* save link status information */ 765 if (link) 766 *link = *li; 767 768 /* flag cleared so calling functions don't call AQ again */ 769 pi->phy.get_link_info = false; 770 771 return 0; 772 } 773 774 /** 775 * ice_fill_tx_timer_and_fc_thresh 776 * @hw: pointer to the HW struct 777 * @cmd: pointer to MAC cfg structure 778 * 779 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 780 * descriptor 781 */ 782 static void 783 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 784 struct ice_aqc_set_mac_cfg *cmd) 785 { 786 u32 val, fc_thres_m; 787 788 /* We read back the transmit timer and FC threshold value of 789 * LFC. Thus, we will use index = 790 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 791 * 792 * Also, because we are operating on transmit timer and FC 793 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 794 */ 795 #define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 796 #define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR 797 798 if (hw->mac_type == ICE_MAC_E830) { 799 /* Retrieve the transmit timer */ 800 val = rd32(hw, E830_PRTMAC_CL01_PS_QNT); 801 cmd->tx_tmr_value = 802 le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M); 803 804 /* Retrieve the fc threshold */ 805 val = rd32(hw, E830_PRTMAC_CL01_QNT_THR); 806 fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M; 807 } else { 808 /* Retrieve the transmit timer */ 809 val = rd32(hw, 810 E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC)); 811 cmd->tx_tmr_value = 812 le16_encode_bits(val, 813 E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M); 814 815 /* Retrieve the fc threshold */ 816 val = rd32(hw, 817 E800_REFRESH_TMR(E800_IDX_OF_LFC)); 818 fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M; 819 } 820 cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m); 821 } 822 823 /** 824 * ice_aq_set_mac_cfg 825 * @hw: pointer to the HW struct 826 * @max_frame_size: Maximum Frame Size to be supported 827 * @cd: pointer to command details structure or NULL 828 * 829 * Set MAC configuration (0x0603) 830 */ 831 int 832 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 833 { 834 struct ice_aqc_set_mac_cfg *cmd; 835 struct ice_aq_desc desc; 836 837 cmd = &desc.params.set_mac_cfg; 838 839 if (max_frame_size == 0) 840 return -EINVAL; 841 842 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 843 844 cmd->max_frame_size = cpu_to_le16(max_frame_size); 845 846 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 847 848 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 849 } 850 851 /** 852 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 853 * @hw: pointer to the HW struct 854 */ 855 static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) 856 { 857 struct ice_switch_info *sw; 858 int status; 859 860 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 861 sizeof(*hw->switch_info), GFP_KERNEL); 862 sw = hw->switch_info; 863 864 if (!sw) 865 return -ENOMEM; 866 867 INIT_LIST_HEAD(&sw->vsi_list_map_head); 868 sw->prof_res_bm_init = 0; 869 870 status = ice_init_def_sw_recp(hw); 871 if (status) { 872 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 873 return status; 874 } 875 return 0; 876 } 877 878 /** 879 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 880 * @hw: pointer to the HW struct 881 */ 882 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 883 { 884 struct ice_switch_info *sw = hw->switch_info; 885 struct ice_vsi_list_map_info *v_pos_map; 886 struct ice_vsi_list_map_info *v_tmp_map; 887 struct ice_sw_recipe *recps; 888 u8 i; 889 890 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 891 list_entry) { 892 list_del(&v_pos_map->list_entry); 893 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 894 } 895 recps = sw->recp_list; 896 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 897 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 898 899 recps[i].root_rid = i; 900 list_for_each_entry_safe(rg_entry, tmprg_entry, 901 &recps[i].rg_list, l_entry) { 902 list_del(&rg_entry->l_entry); 903 devm_kfree(ice_hw_to_dev(hw), rg_entry); 904 } 905 906 if (recps[i].adv_rule) { 907 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 908 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 909 910 mutex_destroy(&recps[i].filt_rule_lock); 911 list_for_each_entry_safe(lst_itr, tmp_entry, 912 &recps[i].filt_rules, 913 list_entry) { 914 list_del(&lst_itr->list_entry); 915 devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups); 916 devm_kfree(ice_hw_to_dev(hw), lst_itr); 917 } 918 } else { 919 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 920 921 mutex_destroy(&recps[i].filt_rule_lock); 922 list_for_each_entry_safe(lst_itr, tmp_entry, 923 &recps[i].filt_rules, 924 list_entry) { 925 list_del(&lst_itr->list_entry); 926 devm_kfree(ice_hw_to_dev(hw), lst_itr); 927 } 928 } 929 devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); 930 } 931 ice_rm_all_sw_replay_rule_info(hw); 932 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 933 devm_kfree(ice_hw_to_dev(hw), sw); 934 } 935 936 /** 937 * ice_get_fw_log_cfg - get FW logging configuration 938 * @hw: pointer to the HW struct 939 */ 940 static int ice_get_fw_log_cfg(struct ice_hw *hw) 941 { 942 struct ice_aq_desc desc; 943 __le16 *config; 944 int status; 945 u16 size; 946 947 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 948 config = kzalloc(size, GFP_KERNEL); 949 if (!config) 950 return -ENOMEM; 951 952 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 953 954 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 955 if (!status) { 956 u16 i; 957 958 /* Save FW logging information into the HW structure */ 959 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 960 u16 v, m, flgs; 961 962 v = le16_to_cpu(config[i]); 963 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 964 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 965 966 if (m < ICE_AQC_FW_LOG_ID_MAX) 967 hw->fw_log.evnts[m].cur = flgs; 968 } 969 } 970 971 kfree(config); 972 973 return status; 974 } 975 976 /** 977 * ice_cfg_fw_log - configure FW logging 978 * @hw: pointer to the HW struct 979 * @enable: enable certain FW logging events if true, disable all if false 980 * 981 * This function enables/disables the FW logging via Rx CQ events and a UART 982 * port based on predetermined configurations. FW logging via the Rx CQ can be 983 * enabled/disabled for individual PF's. However, FW logging via the UART can 984 * only be enabled/disabled for all PFs on the same device. 985 * 986 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 987 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 988 * before initializing the device. 989 * 990 * When re/configuring FW logging, callers need to update the "cfg" elements of 991 * the hw->fw_log.evnts array with the desired logging event configurations for 992 * modules of interest. When disabling FW logging completely, the callers can 993 * just pass false in the "enable" parameter. On completion, the function will 994 * update the "cur" element of the hw->fw_log.evnts array with the resulting 995 * logging event configurations of the modules that are being re/configured. FW 996 * logging modules that are not part of a reconfiguration operation retain their 997 * previous states. 998 * 999 * Before resetting the device, it is recommended that the driver disables FW 1000 * logging before shutting down the control queue. When disabling FW logging 1001 * ("enable" = false), the latest configurations of FW logging events stored in 1002 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 1003 * a device reset. 1004 * 1005 * When enabling FW logging to emit log messages via the Rx CQ during the 1006 * device's initialization phase, a mechanism alternative to interrupt handlers 1007 * needs to be used to extract FW log messages from the Rx CQ periodically and 1008 * to prevent the Rx CQ from being full and stalling other types of control 1009 * messages from FW to SW. Interrupts are typically disabled during the device's 1010 * initialization phase. 1011 */ 1012 static int ice_cfg_fw_log(struct ice_hw *hw, bool enable) 1013 { 1014 struct ice_aqc_fw_logging *cmd; 1015 u16 i, chgs = 0, len = 0; 1016 struct ice_aq_desc desc; 1017 __le16 *data = NULL; 1018 u8 actv_evnts = 0; 1019 void *buf = NULL; 1020 int status = 0; 1021 1022 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 1023 return 0; 1024 1025 /* Disable FW logging only when the control queue is still responsive */ 1026 if (!enable && 1027 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 1028 return 0; 1029 1030 /* Get current FW log settings */ 1031 status = ice_get_fw_log_cfg(hw); 1032 if (status) 1033 return status; 1034 1035 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 1036 cmd = &desc.params.fw_logging; 1037 1038 /* Indicate which controls are valid */ 1039 if (hw->fw_log.cq_en) 1040 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 1041 1042 if (hw->fw_log.uart_en) 1043 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 1044 1045 if (enable) { 1046 /* Fill in an array of entries with FW logging modules and 1047 * logging events being reconfigured. 1048 */ 1049 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 1050 u16 val; 1051 1052 /* Keep track of enabled event types */ 1053 actv_evnts |= hw->fw_log.evnts[i].cfg; 1054 1055 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 1056 continue; 1057 1058 if (!data) { 1059 data = devm_kcalloc(ice_hw_to_dev(hw), 1060 ICE_AQC_FW_LOG_ID_MAX, 1061 sizeof(*data), 1062 GFP_KERNEL); 1063 if (!data) 1064 return -ENOMEM; 1065 } 1066 1067 val = i << ICE_AQC_FW_LOG_ID_S; 1068 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 1069 data[chgs++] = cpu_to_le16(val); 1070 } 1071 1072 /* Only enable FW logging if at least one module is specified. 1073 * If FW logging is currently enabled but all modules are not 1074 * enabled to emit log messages, disable FW logging altogether. 1075 */ 1076 if (actv_evnts) { 1077 /* Leave if there is effectively no change */ 1078 if (!chgs) 1079 goto out; 1080 1081 if (hw->fw_log.cq_en) 1082 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 1083 1084 if (hw->fw_log.uart_en) 1085 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 1086 1087 buf = data; 1088 len = sizeof(*data) * chgs; 1089 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1090 } 1091 } 1092 1093 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 1094 if (!status) { 1095 /* Update the current configuration to reflect events enabled. 1096 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 1097 * logging mode is enabled for the device. They do not reflect 1098 * actual modules being enabled to emit log messages. So, their 1099 * values remain unchanged even when all modules are disabled. 1100 */ 1101 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 1102 1103 hw->fw_log.actv_evnts = actv_evnts; 1104 for (i = 0; i < cnt; i++) { 1105 u16 v, m; 1106 1107 if (!enable) { 1108 /* When disabling all FW logging events as part 1109 * of device's de-initialization, the original 1110 * configurations are retained, and can be used 1111 * to reconfigure FW logging later if the device 1112 * is re-initialized. 1113 */ 1114 hw->fw_log.evnts[i].cur = 0; 1115 continue; 1116 } 1117 1118 v = le16_to_cpu(data[i]); 1119 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 1120 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 1121 } 1122 } 1123 1124 out: 1125 devm_kfree(ice_hw_to_dev(hw), data); 1126 1127 return status; 1128 } 1129 1130 /** 1131 * ice_output_fw_log 1132 * @hw: pointer to the HW struct 1133 * @desc: pointer to the AQ message descriptor 1134 * @buf: pointer to the buffer accompanying the AQ message 1135 * 1136 * Formats a FW Log message and outputs it via the standard driver logs. 1137 */ 1138 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 1139 { 1140 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 1141 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 1142 le16_to_cpu(desc->datalen)); 1143 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 1144 } 1145 1146 /** 1147 * ice_get_itr_intrl_gran 1148 * @hw: pointer to the HW struct 1149 * 1150 * Determines the ITR/INTRL granularities based on the maximum aggregate 1151 * bandwidth according to the device's configuration during power-on. 1152 */ 1153 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 1154 { 1155 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 1156 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 1157 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 1158 1159 switch (max_agg_bw) { 1160 case ICE_MAX_AGG_BW_200G: 1161 case ICE_MAX_AGG_BW_100G: 1162 case ICE_MAX_AGG_BW_50G: 1163 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 1164 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 1165 break; 1166 case ICE_MAX_AGG_BW_25G: 1167 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 1168 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 1169 break; 1170 } 1171 } 1172 1173 /** 1174 * ice_init_hw - main hardware initialization routine 1175 * @hw: pointer to the hardware structure 1176 */ 1177 int ice_init_hw(struct ice_hw *hw) 1178 { 1179 struct ice_aqc_get_phy_caps_data *pcaps; 1180 u16 mac_buf_len; 1181 void *mac_buf; 1182 int status; 1183 1184 /* Set MAC type based on DeviceID */ 1185 status = ice_set_mac_type(hw); 1186 if (status) 1187 return status; 1188 1189 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 1190 PF_FUNC_RID_FUNC_NUM_M) >> 1191 PF_FUNC_RID_FUNC_NUM_S; 1192 1193 status = ice_reset(hw, ICE_RESET_PFR); 1194 if (status) 1195 return status; 1196 1197 ice_get_itr_intrl_gran(hw); 1198 1199 status = ice_create_all_ctrlq(hw); 1200 if (status) 1201 goto err_unroll_cqinit; 1202 1203 /* Enable FW logging. Not fatal if this fails. */ 1204 status = ice_cfg_fw_log(hw, true); 1205 if (status) 1206 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 1207 1208 status = ice_clear_pf_cfg(hw); 1209 if (status) 1210 goto err_unroll_cqinit; 1211 1212 /* Set bit to enable Flow Director filters */ 1213 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 1214 INIT_LIST_HEAD(&hw->fdir_list_head); 1215 1216 ice_clear_pxe_mode(hw); 1217 1218 status = ice_init_nvm(hw); 1219 if (status) 1220 goto err_unroll_cqinit; 1221 1222 status = ice_get_caps(hw); 1223 if (status) 1224 goto err_unroll_cqinit; 1225 1226 if (!hw->port_info) 1227 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 1228 sizeof(*hw->port_info), 1229 GFP_KERNEL); 1230 if (!hw->port_info) { 1231 status = -ENOMEM; 1232 goto err_unroll_cqinit; 1233 } 1234 1235 /* set the back pointer to HW */ 1236 hw->port_info->hw = hw; 1237 1238 /* Initialize port_info struct with switch configuration data */ 1239 status = ice_get_initial_sw_cfg(hw); 1240 if (status) 1241 goto err_unroll_alloc; 1242 1243 hw->evb_veb = true; 1244 1245 /* init xarray for identifying scheduling nodes uniquely */ 1246 xa_init_flags(&hw->port_info->sched_node_ids, XA_FLAGS_ALLOC); 1247 1248 /* Query the allocated resources for Tx scheduler */ 1249 status = ice_sched_query_res_alloc(hw); 1250 if (status) { 1251 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1252 goto err_unroll_alloc; 1253 } 1254 ice_sched_get_psm_clk_freq(hw); 1255 1256 /* Initialize port_info struct with scheduler data */ 1257 status = ice_sched_init_port(hw->port_info); 1258 if (status) 1259 goto err_unroll_sched; 1260 1261 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 1262 if (!pcaps) { 1263 status = -ENOMEM; 1264 goto err_unroll_sched; 1265 } 1266 1267 /* Initialize port_info struct with PHY capabilities */ 1268 status = ice_aq_get_phy_caps(hw->port_info, false, 1269 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 1270 NULL); 1271 devm_kfree(ice_hw_to_dev(hw), pcaps); 1272 if (status) 1273 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 1274 status); 1275 1276 /* Initialize port_info struct with link information */ 1277 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1278 if (status) 1279 goto err_unroll_sched; 1280 1281 /* need a valid SW entry point to build a Tx tree */ 1282 if (!hw->sw_entry_point_layer) { 1283 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1284 status = -EIO; 1285 goto err_unroll_sched; 1286 } 1287 INIT_LIST_HEAD(&hw->agg_list); 1288 /* Initialize max burst size */ 1289 if (!hw->max_burst_size) 1290 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1291 1292 status = ice_init_fltr_mgmt_struct(hw); 1293 if (status) 1294 goto err_unroll_sched; 1295 1296 /* Get MAC information */ 1297 /* A single port can report up to two (LAN and WoL) addresses */ 1298 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 1299 sizeof(struct ice_aqc_manage_mac_read_resp), 1300 GFP_KERNEL); 1301 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1302 1303 if (!mac_buf) { 1304 status = -ENOMEM; 1305 goto err_unroll_fltr_mgmt_struct; 1306 } 1307 1308 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1309 devm_kfree(ice_hw_to_dev(hw), mac_buf); 1310 1311 if (status) 1312 goto err_unroll_fltr_mgmt_struct; 1313 /* enable jumbo frame support at MAC level */ 1314 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 1315 if (status) 1316 goto err_unroll_fltr_mgmt_struct; 1317 /* Obtain counter base index which would be used by flow director */ 1318 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 1319 if (status) 1320 goto err_unroll_fltr_mgmt_struct; 1321 status = ice_init_hw_tbls(hw); 1322 if (status) 1323 goto err_unroll_fltr_mgmt_struct; 1324 mutex_init(&hw->tnl_lock); 1325 return 0; 1326 1327 err_unroll_fltr_mgmt_struct: 1328 ice_cleanup_fltr_mgmt_struct(hw); 1329 err_unroll_sched: 1330 ice_sched_cleanup_all(hw); 1331 err_unroll_alloc: 1332 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1333 err_unroll_cqinit: 1334 ice_destroy_all_ctrlq(hw); 1335 return status; 1336 } 1337 1338 /** 1339 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1340 * @hw: pointer to the hardware structure 1341 * 1342 * This should be called only during nominal operation, not as a result of 1343 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1344 * applicable initializations if it fails for any reason. 1345 */ 1346 void ice_deinit_hw(struct ice_hw *hw) 1347 { 1348 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1349 ice_cleanup_fltr_mgmt_struct(hw); 1350 1351 ice_sched_cleanup_all(hw); 1352 ice_sched_clear_agg(hw); 1353 ice_free_seg(hw); 1354 ice_free_hw_tbls(hw); 1355 mutex_destroy(&hw->tnl_lock); 1356 1357 /* Attempt to disable FW logging before shutting down control queues */ 1358 ice_cfg_fw_log(hw, false); 1359 ice_destroy_all_ctrlq(hw); 1360 1361 /* Clear VSI contexts if not already cleared */ 1362 ice_clear_all_vsi_ctx(hw); 1363 } 1364 1365 /** 1366 * ice_check_reset - Check to see if a global reset is complete 1367 * @hw: pointer to the hardware structure 1368 */ 1369 int ice_check_reset(struct ice_hw *hw) 1370 { 1371 u32 cnt, reg = 0, grst_timeout, uld_mask; 1372 1373 /* Poll for Device Active state in case a recent CORER, GLOBR, 1374 * or EMPR has occurred. The grst delay value is in 100ms units. 1375 * Add 1sec for outstanding AQ commands that can take a long time. 1376 */ 1377 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1378 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1379 1380 for (cnt = 0; cnt < grst_timeout; cnt++) { 1381 mdelay(100); 1382 reg = rd32(hw, GLGEN_RSTAT); 1383 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1384 break; 1385 } 1386 1387 if (cnt == grst_timeout) { 1388 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1389 return -EIO; 1390 } 1391 1392 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1393 GLNVM_ULD_PCIER_DONE_1_M |\ 1394 GLNVM_ULD_CORER_DONE_M |\ 1395 GLNVM_ULD_GLOBR_DONE_M |\ 1396 GLNVM_ULD_POR_DONE_M |\ 1397 GLNVM_ULD_POR_DONE_1_M |\ 1398 GLNVM_ULD_PCIER_DONE_2_M) 1399 1400 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1401 GLNVM_ULD_PE_DONE_M : 0); 1402 1403 /* Device is Active; check Global Reset processes are done */ 1404 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1405 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1406 if (reg == uld_mask) { 1407 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1408 break; 1409 } 1410 mdelay(10); 1411 } 1412 1413 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1414 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1415 reg); 1416 return -EIO; 1417 } 1418 1419 return 0; 1420 } 1421 1422 /** 1423 * ice_pf_reset - Reset the PF 1424 * @hw: pointer to the hardware structure 1425 * 1426 * If a global reset has been triggered, this function checks 1427 * for its completion and then issues the PF reset 1428 */ 1429 static int ice_pf_reset(struct ice_hw *hw) 1430 { 1431 u32 cnt, reg; 1432 1433 /* If at function entry a global reset was already in progress, i.e. 1434 * state is not 'device active' or any of the reset done bits are not 1435 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1436 * global reset is done. 1437 */ 1438 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1439 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1440 /* poll on global reset currently in progress until done */ 1441 if (ice_check_reset(hw)) 1442 return -EIO; 1443 1444 return 0; 1445 } 1446 1447 /* Reset the PF */ 1448 reg = rd32(hw, PFGEN_CTRL); 1449 1450 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1451 1452 /* Wait for the PFR to complete. The wait time is the global config lock 1453 * timeout plus the PFR timeout which will account for a possible reset 1454 * that is occurring during a download package operation. 1455 */ 1456 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1457 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1458 reg = rd32(hw, PFGEN_CTRL); 1459 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1460 break; 1461 1462 mdelay(1); 1463 } 1464 1465 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1466 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1467 return -EIO; 1468 } 1469 1470 return 0; 1471 } 1472 1473 /** 1474 * ice_reset - Perform different types of reset 1475 * @hw: pointer to the hardware structure 1476 * @req: reset request 1477 * 1478 * This function triggers a reset as specified by the req parameter. 1479 * 1480 * Note: 1481 * If anything other than a PF reset is triggered, PXE mode is restored. 1482 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1483 * interface has been restored in the rebuild flow. 1484 */ 1485 int ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1486 { 1487 u32 val = 0; 1488 1489 switch (req) { 1490 case ICE_RESET_PFR: 1491 return ice_pf_reset(hw); 1492 case ICE_RESET_CORER: 1493 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1494 val = GLGEN_RTRIG_CORER_M; 1495 break; 1496 case ICE_RESET_GLOBR: 1497 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1498 val = GLGEN_RTRIG_GLOBR_M; 1499 break; 1500 default: 1501 return -EINVAL; 1502 } 1503 1504 val |= rd32(hw, GLGEN_RTRIG); 1505 wr32(hw, GLGEN_RTRIG, val); 1506 ice_flush(hw); 1507 1508 /* wait for the FW to be ready */ 1509 return ice_check_reset(hw); 1510 } 1511 1512 /** 1513 * ice_copy_rxq_ctx_to_hw 1514 * @hw: pointer to the hardware structure 1515 * @ice_rxq_ctx: pointer to the rxq context 1516 * @rxq_index: the index of the Rx queue 1517 * 1518 * Copies rxq context from dense structure to HW register space 1519 */ 1520 static int 1521 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1522 { 1523 u8 i; 1524 1525 if (!ice_rxq_ctx) 1526 return -EINVAL; 1527 1528 if (rxq_index > QRX_CTRL_MAX_INDEX) 1529 return -EINVAL; 1530 1531 /* Copy each dword separately to HW */ 1532 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1533 wr32(hw, QRX_CONTEXT(i, rxq_index), 1534 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1535 1536 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1537 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1538 } 1539 1540 return 0; 1541 } 1542 1543 /* LAN Rx Queue Context */ 1544 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1545 /* Field Width LSB */ 1546 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1547 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1548 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1549 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1550 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1551 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1552 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1553 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1554 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1555 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1556 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1557 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1558 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1559 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1560 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1561 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1562 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1563 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1564 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1565 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1566 { 0 } 1567 }; 1568 1569 /** 1570 * ice_write_rxq_ctx 1571 * @hw: pointer to the hardware structure 1572 * @rlan_ctx: pointer to the rxq context 1573 * @rxq_index: the index of the Rx queue 1574 * 1575 * Converts rxq context from sparse to dense structure and then writes 1576 * it to HW register space and enables the hardware to prefetch descriptors 1577 * instead of only fetching them on demand 1578 */ 1579 int 1580 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1581 u32 rxq_index) 1582 { 1583 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1584 1585 if (!rlan_ctx) 1586 return -EINVAL; 1587 1588 rlan_ctx->prefena = 1; 1589 1590 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1591 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1592 } 1593 1594 /* LAN Tx Queue Context */ 1595 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1596 /* Field Width LSB */ 1597 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1598 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1599 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1600 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1601 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1602 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1603 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1604 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1605 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1606 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1607 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1608 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1609 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1610 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1611 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1612 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1613 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1614 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1615 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1616 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1617 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1618 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1619 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1620 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1621 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1622 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1623 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1624 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1625 { 0 } 1626 }; 1627 1628 /* Sideband Queue command wrappers */ 1629 1630 /** 1631 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1632 * @hw: pointer to the HW struct 1633 * @desc: descriptor describing the command 1634 * @buf: buffer to use for indirect commands (NULL for direct commands) 1635 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1636 * @cd: pointer to command details structure 1637 */ 1638 static int 1639 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1640 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1641 { 1642 return ice_sq_send_cmd(hw, ice_get_sbq(hw), 1643 (struct ice_aq_desc *)desc, buf, buf_size, cd); 1644 } 1645 1646 /** 1647 * ice_sbq_rw_reg - Fill Sideband Queue command 1648 * @hw: pointer to the HW struct 1649 * @in: message info to be filled in descriptor 1650 */ 1651 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1652 { 1653 struct ice_sbq_cmd_desc desc = {0}; 1654 struct ice_sbq_msg_req msg = {0}; 1655 u16 msg_len; 1656 int status; 1657 1658 msg_len = sizeof(msg); 1659 1660 msg.dest_dev = in->dest_dev; 1661 msg.opcode = in->opcode; 1662 msg.flags = ICE_SBQ_MSG_FLAGS; 1663 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1664 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1665 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1666 1667 if (in->opcode) 1668 msg.data = cpu_to_le32(in->data); 1669 else 1670 /* data read comes back in completion, so shorten the struct by 1671 * sizeof(msg.data) 1672 */ 1673 msg_len -= sizeof(msg.data); 1674 1675 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1676 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1677 desc.param0.cmd_len = cpu_to_le16(msg_len); 1678 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1679 if (!status && !in->opcode) 1680 in->data = le32_to_cpu 1681 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1682 return status; 1683 } 1684 1685 /* FW Admin Queue command wrappers */ 1686 1687 /* Software lock/mutex that is meant to be held while the Global Config Lock 1688 * in firmware is acquired by the software to prevent most (but not all) types 1689 * of AQ commands from being sent to FW 1690 */ 1691 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1692 1693 /** 1694 * ice_should_retry_sq_send_cmd 1695 * @opcode: AQ opcode 1696 * 1697 * Decide if we should retry the send command routine for the ATQ, depending 1698 * on the opcode. 1699 */ 1700 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1701 { 1702 switch (opcode) { 1703 case ice_aqc_opc_get_link_topo: 1704 case ice_aqc_opc_lldp_stop: 1705 case ice_aqc_opc_lldp_start: 1706 case ice_aqc_opc_lldp_filter_ctrl: 1707 return true; 1708 } 1709 1710 return false; 1711 } 1712 1713 /** 1714 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1715 * @hw: pointer to the HW struct 1716 * @cq: pointer to the specific Control queue 1717 * @desc: prefilled descriptor describing the command 1718 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1719 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1720 * @cd: pointer to command details structure 1721 * 1722 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1723 * Queue if the EBUSY AQ error is returned. 1724 */ 1725 static int 1726 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1727 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1728 struct ice_sq_cd *cd) 1729 { 1730 struct ice_aq_desc desc_cpy; 1731 bool is_cmd_for_retry; 1732 u8 idx = 0; 1733 u16 opcode; 1734 int status; 1735 1736 opcode = le16_to_cpu(desc->opcode); 1737 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1738 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1739 1740 if (is_cmd_for_retry) { 1741 /* All retryable cmds are direct, without buf. */ 1742 WARN_ON(buf); 1743 1744 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1745 } 1746 1747 do { 1748 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1749 1750 if (!is_cmd_for_retry || !status || 1751 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1752 break; 1753 1754 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1755 1756 msleep(ICE_SQ_SEND_DELAY_TIME_MS); 1757 1758 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1759 1760 return status; 1761 } 1762 1763 /** 1764 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1765 * @hw: pointer to the HW struct 1766 * @desc: descriptor describing the command 1767 * @buf: buffer to use for indirect commands (NULL for direct commands) 1768 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1769 * @cd: pointer to command details structure 1770 * 1771 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1772 */ 1773 int 1774 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1775 u16 buf_size, struct ice_sq_cd *cd) 1776 { 1777 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1778 bool lock_acquired = false; 1779 int status; 1780 1781 /* When a package download is in process (i.e. when the firmware's 1782 * Global Configuration Lock resource is held), only the Download 1783 * Package, Get Version, Get Package Info List, Upload Section, 1784 * Update Package, Set Port Parameters, Get/Set VLAN Mode Parameters, 1785 * Add Recipe, Set Recipes to Profile Association, Get Recipe, and Get 1786 * Recipes to Profile Association, and Release Resource (with resource 1787 * ID set to Global Config Lock) AdminQ commands are allowed; all others 1788 * must block until the package download completes and the Global Config 1789 * Lock is released. See also ice_acquire_global_cfg_lock(). 1790 */ 1791 switch (le16_to_cpu(desc->opcode)) { 1792 case ice_aqc_opc_download_pkg: 1793 case ice_aqc_opc_get_pkg_info_list: 1794 case ice_aqc_opc_get_ver: 1795 case ice_aqc_opc_upload_section: 1796 case ice_aqc_opc_update_pkg: 1797 case ice_aqc_opc_set_port_params: 1798 case ice_aqc_opc_get_vlan_mode_parameters: 1799 case ice_aqc_opc_set_vlan_mode_parameters: 1800 case ice_aqc_opc_add_recipe: 1801 case ice_aqc_opc_recipe_to_profile: 1802 case ice_aqc_opc_get_recipe: 1803 case ice_aqc_opc_get_recipe_to_profile: 1804 break; 1805 case ice_aqc_opc_release_res: 1806 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1807 break; 1808 fallthrough; 1809 default: 1810 mutex_lock(&ice_global_cfg_lock_sw); 1811 lock_acquired = true; 1812 break; 1813 } 1814 1815 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1816 if (lock_acquired) 1817 mutex_unlock(&ice_global_cfg_lock_sw); 1818 1819 return status; 1820 } 1821 1822 /** 1823 * ice_aq_get_fw_ver 1824 * @hw: pointer to the HW struct 1825 * @cd: pointer to command details structure or NULL 1826 * 1827 * Get the firmware version (0x0001) from the admin queue commands 1828 */ 1829 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1830 { 1831 struct ice_aqc_get_ver *resp; 1832 struct ice_aq_desc desc; 1833 int status; 1834 1835 resp = &desc.params.get_ver; 1836 1837 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1838 1839 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1840 1841 if (!status) { 1842 hw->fw_branch = resp->fw_branch; 1843 hw->fw_maj_ver = resp->fw_major; 1844 hw->fw_min_ver = resp->fw_minor; 1845 hw->fw_patch = resp->fw_patch; 1846 hw->fw_build = le32_to_cpu(resp->fw_build); 1847 hw->api_branch = resp->api_branch; 1848 hw->api_maj_ver = resp->api_major; 1849 hw->api_min_ver = resp->api_minor; 1850 hw->api_patch = resp->api_patch; 1851 } 1852 1853 return status; 1854 } 1855 1856 /** 1857 * ice_aq_send_driver_ver 1858 * @hw: pointer to the HW struct 1859 * @dv: driver's major, minor version 1860 * @cd: pointer to command details structure or NULL 1861 * 1862 * Send the driver version (0x0002) to the firmware 1863 */ 1864 int 1865 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1866 struct ice_sq_cd *cd) 1867 { 1868 struct ice_aqc_driver_ver *cmd; 1869 struct ice_aq_desc desc; 1870 u16 len; 1871 1872 cmd = &desc.params.driver_ver; 1873 1874 if (!dv) 1875 return -EINVAL; 1876 1877 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1878 1879 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1880 cmd->major_ver = dv->major_ver; 1881 cmd->minor_ver = dv->minor_ver; 1882 cmd->build_ver = dv->build_ver; 1883 cmd->subbuild_ver = dv->subbuild_ver; 1884 1885 len = 0; 1886 while (len < sizeof(dv->driver_string) && 1887 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1888 len++; 1889 1890 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1891 } 1892 1893 /** 1894 * ice_aq_q_shutdown 1895 * @hw: pointer to the HW struct 1896 * @unloading: is the driver unloading itself 1897 * 1898 * Tell the Firmware that we're shutting down the AdminQ and whether 1899 * or not the driver is unloading as well (0x0003). 1900 */ 1901 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1902 { 1903 struct ice_aqc_q_shutdown *cmd; 1904 struct ice_aq_desc desc; 1905 1906 cmd = &desc.params.q_shutdown; 1907 1908 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1909 1910 if (unloading) 1911 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1912 1913 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1914 } 1915 1916 /** 1917 * ice_aq_req_res 1918 * @hw: pointer to the HW struct 1919 * @res: resource ID 1920 * @access: access type 1921 * @sdp_number: resource number 1922 * @timeout: the maximum time in ms that the driver may hold the resource 1923 * @cd: pointer to command details structure or NULL 1924 * 1925 * Requests common resource using the admin queue commands (0x0008). 1926 * When attempting to acquire the Global Config Lock, the driver can 1927 * learn of three states: 1928 * 1) 0 - acquired lock, and can perform download package 1929 * 2) -EIO - did not get lock, driver should fail to load 1930 * 3) -EALREADY - did not get lock, but another driver has 1931 * successfully downloaded the package; the driver does 1932 * not have to download the package and can continue 1933 * loading 1934 * 1935 * Note that if the caller is in an acquire lock, perform action, release lock 1936 * phase of operation, it is possible that the FW may detect a timeout and issue 1937 * a CORER. In this case, the driver will receive a CORER interrupt and will 1938 * have to determine its cause. The calling thread that is handling this flow 1939 * will likely get an error propagated back to it indicating the Download 1940 * Package, Update Package or the Release Resource AQ commands timed out. 1941 */ 1942 static int 1943 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1944 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1945 struct ice_sq_cd *cd) 1946 { 1947 struct ice_aqc_req_res *cmd_resp; 1948 struct ice_aq_desc desc; 1949 int status; 1950 1951 cmd_resp = &desc.params.res_owner; 1952 1953 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1954 1955 cmd_resp->res_id = cpu_to_le16(res); 1956 cmd_resp->access_type = cpu_to_le16(access); 1957 cmd_resp->res_number = cpu_to_le32(sdp_number); 1958 cmd_resp->timeout = cpu_to_le32(*timeout); 1959 *timeout = 0; 1960 1961 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1962 1963 /* The completion specifies the maximum time in ms that the driver 1964 * may hold the resource in the Timeout field. 1965 */ 1966 1967 /* Global config lock response utilizes an additional status field. 1968 * 1969 * If the Global config lock resource is held by some other driver, the 1970 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1971 * and the timeout field indicates the maximum time the current owner 1972 * of the resource has to free it. 1973 */ 1974 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1975 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1976 *timeout = le32_to_cpu(cmd_resp->timeout); 1977 return 0; 1978 } else if (le16_to_cpu(cmd_resp->status) == 1979 ICE_AQ_RES_GLBL_IN_PROG) { 1980 *timeout = le32_to_cpu(cmd_resp->timeout); 1981 return -EIO; 1982 } else if (le16_to_cpu(cmd_resp->status) == 1983 ICE_AQ_RES_GLBL_DONE) { 1984 return -EALREADY; 1985 } 1986 1987 /* invalid FW response, force a timeout immediately */ 1988 *timeout = 0; 1989 return -EIO; 1990 } 1991 1992 /* If the resource is held by some other driver, the command completes 1993 * with a busy return value and the timeout field indicates the maximum 1994 * time the current owner of the resource has to free it. 1995 */ 1996 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1997 *timeout = le32_to_cpu(cmd_resp->timeout); 1998 1999 return status; 2000 } 2001 2002 /** 2003 * ice_aq_release_res 2004 * @hw: pointer to the HW struct 2005 * @res: resource ID 2006 * @sdp_number: resource number 2007 * @cd: pointer to command details structure or NULL 2008 * 2009 * release common resource using the admin queue commands (0x0009) 2010 */ 2011 static int 2012 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 2013 struct ice_sq_cd *cd) 2014 { 2015 struct ice_aqc_req_res *cmd; 2016 struct ice_aq_desc desc; 2017 2018 cmd = &desc.params.res_owner; 2019 2020 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 2021 2022 cmd->res_id = cpu_to_le16(res); 2023 cmd->res_number = cpu_to_le32(sdp_number); 2024 2025 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2026 } 2027 2028 /** 2029 * ice_acquire_res 2030 * @hw: pointer to the HW structure 2031 * @res: resource ID 2032 * @access: access type (read or write) 2033 * @timeout: timeout in milliseconds 2034 * 2035 * This function will attempt to acquire the ownership of a resource. 2036 */ 2037 int 2038 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 2039 enum ice_aq_res_access_type access, u32 timeout) 2040 { 2041 #define ICE_RES_POLLING_DELAY_MS 10 2042 u32 delay = ICE_RES_POLLING_DELAY_MS; 2043 u32 time_left = timeout; 2044 int status; 2045 2046 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2047 2048 /* A return code of -EALREADY means that another driver has 2049 * previously acquired the resource and performed any necessary updates; 2050 * in this case the caller does not obtain the resource and has no 2051 * further work to do. 2052 */ 2053 if (status == -EALREADY) 2054 goto ice_acquire_res_exit; 2055 2056 if (status) 2057 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 2058 2059 /* If necessary, poll until the current lock owner timeouts */ 2060 timeout = time_left; 2061 while (status && timeout && time_left) { 2062 mdelay(delay); 2063 timeout = (timeout > delay) ? timeout - delay : 0; 2064 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2065 2066 if (status == -EALREADY) 2067 /* lock free, but no work to do */ 2068 break; 2069 2070 if (!status) 2071 /* lock acquired */ 2072 break; 2073 } 2074 if (status && status != -EALREADY) 2075 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 2076 2077 ice_acquire_res_exit: 2078 if (status == -EALREADY) { 2079 if (access == ICE_RES_WRITE) 2080 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 2081 else 2082 ice_debug(hw, ICE_DBG_RES, "Warning: -EALREADY not expected\n"); 2083 } 2084 return status; 2085 } 2086 2087 /** 2088 * ice_release_res 2089 * @hw: pointer to the HW structure 2090 * @res: resource ID 2091 * 2092 * This function will release a resource using the proper Admin Command. 2093 */ 2094 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 2095 { 2096 unsigned long timeout; 2097 int status; 2098 2099 /* there are some rare cases when trying to release the resource 2100 * results in an admin queue timeout, so handle them correctly 2101 */ 2102 timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; 2103 do { 2104 status = ice_aq_release_res(hw, res, 0, NULL); 2105 if (status != -EIO) 2106 break; 2107 usleep_range(1000, 2000); 2108 } while (time_before(jiffies, timeout)); 2109 } 2110 2111 /** 2112 * ice_aq_alloc_free_res - command to allocate/free resources 2113 * @hw: pointer to the HW struct 2114 * @buf: Indirect buffer to hold data parameters and response 2115 * @buf_size: size of buffer for indirect commands 2116 * @opc: pass in the command opcode 2117 * 2118 * Helper function to allocate/free resources using the admin queue commands 2119 */ 2120 int ice_aq_alloc_free_res(struct ice_hw *hw, 2121 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2122 enum ice_adminq_opc opc) 2123 { 2124 struct ice_aqc_alloc_free_res_cmd *cmd; 2125 struct ice_aq_desc desc; 2126 2127 cmd = &desc.params.sw_res_ctrl; 2128 2129 if (!buf || buf_size < flex_array_size(buf, elem, 1)) 2130 return -EINVAL; 2131 2132 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2133 2134 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2135 2136 cmd->num_entries = cpu_to_le16(1); 2137 2138 return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL); 2139 } 2140 2141 /** 2142 * ice_alloc_hw_res - allocate resource 2143 * @hw: pointer to the HW struct 2144 * @type: type of resource 2145 * @num: number of resources to allocate 2146 * @btm: allocate from bottom 2147 * @res: pointer to array that will receive the resources 2148 */ 2149 int 2150 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2151 { 2152 struct ice_aqc_alloc_free_res_elem *buf; 2153 u16 buf_len; 2154 int status; 2155 2156 buf_len = struct_size(buf, elem, num); 2157 buf = kzalloc(buf_len, GFP_KERNEL); 2158 if (!buf) 2159 return -ENOMEM; 2160 2161 /* Prepare buffer to allocate resource. */ 2162 buf->num_elems = cpu_to_le16(num); 2163 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2164 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2165 if (btm) 2166 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2167 2168 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res); 2169 if (status) 2170 goto ice_alloc_res_exit; 2171 2172 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 2173 2174 ice_alloc_res_exit: 2175 kfree(buf); 2176 return status; 2177 } 2178 2179 /** 2180 * ice_free_hw_res - free allocated HW resource 2181 * @hw: pointer to the HW struct 2182 * @type: type of resource to free 2183 * @num: number of resources 2184 * @res: pointer to array that contains the resources to free 2185 */ 2186 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2187 { 2188 struct ice_aqc_alloc_free_res_elem *buf; 2189 u16 buf_len; 2190 int status; 2191 2192 buf_len = struct_size(buf, elem, num); 2193 buf = kzalloc(buf_len, GFP_KERNEL); 2194 if (!buf) 2195 return -ENOMEM; 2196 2197 /* Prepare buffer to free resource. */ 2198 buf->num_elems = cpu_to_le16(num); 2199 buf->res_type = cpu_to_le16(type); 2200 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 2201 2202 status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res); 2203 if (status) 2204 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2205 2206 kfree(buf); 2207 return status; 2208 } 2209 2210 /** 2211 * ice_get_num_per_func - determine number of resources per PF 2212 * @hw: pointer to the HW structure 2213 * @max: value to be evenly split between each PF 2214 * 2215 * Determine the number of valid functions by going through the bitmap returned 2216 * from parsing capabilities and use this to calculate the number of resources 2217 * per PF based on the max value passed in. 2218 */ 2219 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2220 { 2221 u8 funcs; 2222 2223 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2224 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 2225 ICE_CAPS_VALID_FUNCS_M); 2226 2227 if (!funcs) 2228 return 0; 2229 2230 return max / funcs; 2231 } 2232 2233 /** 2234 * ice_parse_common_caps - parse common device/function capabilities 2235 * @hw: pointer to the HW struct 2236 * @caps: pointer to common capabilities structure 2237 * @elem: the capability element to parse 2238 * @prefix: message prefix for tracing capabilities 2239 * 2240 * Given a capability element, extract relevant details into the common 2241 * capability structure. 2242 * 2243 * Returns: true if the capability matches one of the common capability ids, 2244 * false otherwise. 2245 */ 2246 static bool 2247 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2248 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2249 { 2250 u32 logical_id = le32_to_cpu(elem->logical_id); 2251 u32 phys_id = le32_to_cpu(elem->phys_id); 2252 u32 number = le32_to_cpu(elem->number); 2253 u16 cap = le16_to_cpu(elem->cap); 2254 bool found = true; 2255 2256 switch (cap) { 2257 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2258 caps->valid_functions = number; 2259 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2260 caps->valid_functions); 2261 break; 2262 case ICE_AQC_CAPS_SRIOV: 2263 caps->sr_iov_1_1 = (number == 1); 2264 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2265 caps->sr_iov_1_1); 2266 break; 2267 case ICE_AQC_CAPS_DCB: 2268 caps->dcb = (number == 1); 2269 caps->active_tc_bitmap = logical_id; 2270 caps->maxtc = phys_id; 2271 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2272 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2273 caps->active_tc_bitmap); 2274 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2275 break; 2276 case ICE_AQC_CAPS_RSS: 2277 caps->rss_table_size = number; 2278 caps->rss_table_entry_width = logical_id; 2279 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2280 caps->rss_table_size); 2281 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2282 caps->rss_table_entry_width); 2283 break; 2284 case ICE_AQC_CAPS_RXQS: 2285 caps->num_rxq = number; 2286 caps->rxq_first_id = phys_id; 2287 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2288 caps->num_rxq); 2289 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2290 caps->rxq_first_id); 2291 break; 2292 case ICE_AQC_CAPS_TXQS: 2293 caps->num_txq = number; 2294 caps->txq_first_id = phys_id; 2295 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2296 caps->num_txq); 2297 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2298 caps->txq_first_id); 2299 break; 2300 case ICE_AQC_CAPS_MSIX: 2301 caps->num_msix_vectors = number; 2302 caps->msix_vector_first_id = phys_id; 2303 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2304 caps->num_msix_vectors); 2305 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2306 caps->msix_vector_first_id); 2307 break; 2308 case ICE_AQC_CAPS_PENDING_NVM_VER: 2309 caps->nvm_update_pending_nvm = true; 2310 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2311 break; 2312 case ICE_AQC_CAPS_PENDING_OROM_VER: 2313 caps->nvm_update_pending_orom = true; 2314 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2315 break; 2316 case ICE_AQC_CAPS_PENDING_NET_VER: 2317 caps->nvm_update_pending_netlist = true; 2318 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2319 break; 2320 case ICE_AQC_CAPS_NVM_MGMT: 2321 caps->nvm_unified_update = 2322 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2323 true : false; 2324 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2325 caps->nvm_unified_update); 2326 break; 2327 case ICE_AQC_CAPS_RDMA: 2328 caps->rdma = (number == 1); 2329 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2330 break; 2331 case ICE_AQC_CAPS_MAX_MTU: 2332 caps->max_mtu = number; 2333 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2334 prefix, caps->max_mtu); 2335 break; 2336 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2337 caps->pcie_reset_avoidance = (number > 0); 2338 ice_debug(hw, ICE_DBG_INIT, 2339 "%s: pcie_reset_avoidance = %d\n", prefix, 2340 caps->pcie_reset_avoidance); 2341 break; 2342 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2343 caps->reset_restrict_support = (number == 1); 2344 ice_debug(hw, ICE_DBG_INIT, 2345 "%s: reset_restrict_support = %d\n", prefix, 2346 caps->reset_restrict_support); 2347 break; 2348 case ICE_AQC_CAPS_FW_LAG_SUPPORT: 2349 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG); 2350 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n", 2351 prefix, caps->roce_lag); 2352 caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG); 2353 ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", 2354 prefix, caps->sriov_lag); 2355 break; 2356 default: 2357 /* Not one of the recognized common capabilities */ 2358 found = false; 2359 } 2360 2361 return found; 2362 } 2363 2364 /** 2365 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2366 * @hw: pointer to the HW structure 2367 * @caps: pointer to capabilities structure to fix 2368 * 2369 * Re-calculate the capabilities that are dependent on the number of physical 2370 * ports; i.e. some features are not supported or function differently on 2371 * devices with more than 4 ports. 2372 */ 2373 static void 2374 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2375 { 2376 /* This assumes device capabilities are always scanned before function 2377 * capabilities during the initialization flow. 2378 */ 2379 if (hw->dev_caps.num_funcs > 4) { 2380 /* Max 4 TCs per port */ 2381 caps->maxtc = 4; 2382 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2383 caps->maxtc); 2384 if (caps->rdma) { 2385 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2386 caps->rdma = 0; 2387 } 2388 2389 /* print message only when processing device capabilities 2390 * during initialization. 2391 */ 2392 if (caps == &hw->dev_caps.common_cap) 2393 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2394 } 2395 } 2396 2397 /** 2398 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2399 * @hw: pointer to the HW struct 2400 * @func_p: pointer to function capabilities structure 2401 * @cap: pointer to the capability element to parse 2402 * 2403 * Extract function capabilities for ICE_AQC_CAPS_VF. 2404 */ 2405 static void 2406 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2407 struct ice_aqc_list_caps_elem *cap) 2408 { 2409 u32 logical_id = le32_to_cpu(cap->logical_id); 2410 u32 number = le32_to_cpu(cap->number); 2411 2412 func_p->num_allocd_vfs = number; 2413 func_p->vf_base_id = logical_id; 2414 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2415 func_p->num_allocd_vfs); 2416 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2417 func_p->vf_base_id); 2418 } 2419 2420 /** 2421 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2422 * @hw: pointer to the HW struct 2423 * @func_p: pointer to function capabilities structure 2424 * @cap: pointer to the capability element to parse 2425 * 2426 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2427 */ 2428 static void 2429 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2430 struct ice_aqc_list_caps_elem *cap) 2431 { 2432 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2433 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2434 le32_to_cpu(cap->number)); 2435 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2436 func_p->guar_num_vsi); 2437 } 2438 2439 /** 2440 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2441 * @hw: pointer to the HW struct 2442 * @func_p: pointer to function capabilities structure 2443 * @cap: pointer to the capability element to parse 2444 * 2445 * Extract function capabilities for ICE_AQC_CAPS_1588. 2446 */ 2447 static void 2448 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2449 struct ice_aqc_list_caps_elem *cap) 2450 { 2451 struct ice_ts_func_info *info = &func_p->ts_func_info; 2452 u32 number = le32_to_cpu(cap->number); 2453 2454 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2455 func_p->common_cap.ieee_1588 = info->ena; 2456 2457 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2458 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2459 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2460 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2461 2462 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; 2463 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2464 2465 if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { 2466 info->time_ref = (enum ice_time_ref_freq)info->clk_freq; 2467 } else { 2468 /* Unknown clock frequency, so assume a (probably incorrect) 2469 * default to avoid out-of-bounds look ups of frequency 2470 * related information. 2471 */ 2472 ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n", 2473 info->clk_freq); 2474 info->time_ref = ICE_TIME_REF_FREQ_25_000; 2475 } 2476 2477 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2478 func_p->common_cap.ieee_1588); 2479 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2480 info->src_tmr_owned); 2481 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2482 info->tmr_ena); 2483 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2484 info->tmr_index_owned); 2485 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2486 info->tmr_index_assoc); 2487 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2488 info->clk_freq); 2489 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2490 info->clk_src); 2491 } 2492 2493 /** 2494 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2495 * @hw: pointer to the HW struct 2496 * @func_p: pointer to function capabilities structure 2497 * 2498 * Extract function capabilities for ICE_AQC_CAPS_FD. 2499 */ 2500 static void 2501 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2502 { 2503 u32 reg_val, gsize, bsize; 2504 2505 reg_val = rd32(hw, GLQF_FD_SIZE); 2506 switch (hw->mac_type) { 2507 case ICE_MAC_E830: 2508 gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2509 bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2510 break; 2511 case ICE_MAC_E810: 2512 default: 2513 gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val); 2514 bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val); 2515 } 2516 func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize); 2517 func_p->fd_fltr_best_effort = bsize; 2518 2519 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2520 func_p->fd_fltr_guar); 2521 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2522 func_p->fd_fltr_best_effort); 2523 } 2524 2525 /** 2526 * ice_parse_func_caps - Parse function capabilities 2527 * @hw: pointer to the HW struct 2528 * @func_p: pointer to function capabilities structure 2529 * @buf: buffer containing the function capability records 2530 * @cap_count: the number of capabilities 2531 * 2532 * Helper function to parse function (0x000A) capabilities list. For 2533 * capabilities shared between device and function, this relies on 2534 * ice_parse_common_caps. 2535 * 2536 * Loop through the list of provided capabilities and extract the relevant 2537 * data into the function capabilities structured. 2538 */ 2539 static void 2540 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2541 void *buf, u32 cap_count) 2542 { 2543 struct ice_aqc_list_caps_elem *cap_resp; 2544 u32 i; 2545 2546 cap_resp = buf; 2547 2548 memset(func_p, 0, sizeof(*func_p)); 2549 2550 for (i = 0; i < cap_count; i++) { 2551 u16 cap = le16_to_cpu(cap_resp[i].cap); 2552 bool found; 2553 2554 found = ice_parse_common_caps(hw, &func_p->common_cap, 2555 &cap_resp[i], "func caps"); 2556 2557 switch (cap) { 2558 case ICE_AQC_CAPS_VF: 2559 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2560 break; 2561 case ICE_AQC_CAPS_VSI: 2562 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2563 break; 2564 case ICE_AQC_CAPS_1588: 2565 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2566 break; 2567 case ICE_AQC_CAPS_FD: 2568 ice_parse_fdir_func_caps(hw, func_p); 2569 break; 2570 default: 2571 /* Don't list common capabilities as unknown */ 2572 if (!found) 2573 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2574 i, cap); 2575 break; 2576 } 2577 } 2578 2579 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2580 } 2581 2582 /** 2583 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2584 * @hw: pointer to the HW struct 2585 * @dev_p: pointer to device capabilities structure 2586 * @cap: capability element to parse 2587 * 2588 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2589 */ 2590 static void 2591 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2592 struct ice_aqc_list_caps_elem *cap) 2593 { 2594 u32 number = le32_to_cpu(cap->number); 2595 2596 dev_p->num_funcs = hweight32(number); 2597 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2598 dev_p->num_funcs); 2599 } 2600 2601 /** 2602 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2603 * @hw: pointer to the HW struct 2604 * @dev_p: pointer to device capabilities structure 2605 * @cap: capability element to parse 2606 * 2607 * Parse ICE_AQC_CAPS_VF for device capabilities. 2608 */ 2609 static void 2610 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2611 struct ice_aqc_list_caps_elem *cap) 2612 { 2613 u32 number = le32_to_cpu(cap->number); 2614 2615 dev_p->num_vfs_exposed = number; 2616 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2617 dev_p->num_vfs_exposed); 2618 } 2619 2620 /** 2621 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2622 * @hw: pointer to the HW struct 2623 * @dev_p: pointer to device capabilities structure 2624 * @cap: capability element to parse 2625 * 2626 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2627 */ 2628 static void 2629 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2630 struct ice_aqc_list_caps_elem *cap) 2631 { 2632 u32 number = le32_to_cpu(cap->number); 2633 2634 dev_p->num_vsi_allocd_to_host = number; 2635 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2636 dev_p->num_vsi_allocd_to_host); 2637 } 2638 2639 /** 2640 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2641 * @hw: pointer to the HW struct 2642 * @dev_p: pointer to device capabilities structure 2643 * @cap: capability element to parse 2644 * 2645 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2646 */ 2647 static void 2648 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2649 struct ice_aqc_list_caps_elem *cap) 2650 { 2651 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2652 u32 logical_id = le32_to_cpu(cap->logical_id); 2653 u32 phys_id = le32_to_cpu(cap->phys_id); 2654 u32 number = le32_to_cpu(cap->number); 2655 2656 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2657 dev_p->common_cap.ieee_1588 = info->ena; 2658 2659 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2660 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2661 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2662 2663 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; 2664 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2665 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2666 2667 info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); 2668 2669 info->ena_ports = logical_id; 2670 info->tmr_own_map = phys_id; 2671 2672 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2673 dev_p->common_cap.ieee_1588); 2674 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2675 info->tmr0_owner); 2676 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2677 info->tmr0_owned); 2678 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2679 info->tmr0_ena); 2680 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2681 info->tmr1_owner); 2682 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2683 info->tmr1_owned); 2684 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2685 info->tmr1_ena); 2686 ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n", 2687 info->ts_ll_read); 2688 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2689 info->ena_ports); 2690 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2691 info->tmr_own_map); 2692 } 2693 2694 /** 2695 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2696 * @hw: pointer to the HW struct 2697 * @dev_p: pointer to device capabilities structure 2698 * @cap: capability element to parse 2699 * 2700 * Parse ICE_AQC_CAPS_FD for device capabilities. 2701 */ 2702 static void 2703 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2704 struct ice_aqc_list_caps_elem *cap) 2705 { 2706 u32 number = le32_to_cpu(cap->number); 2707 2708 dev_p->num_flow_director_fltr = number; 2709 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2710 dev_p->num_flow_director_fltr); 2711 } 2712 2713 /** 2714 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2715 * @hw: pointer to the HW struct 2716 * @dev_p: pointer to device capabilities structure 2717 * @cap: capability element to parse 2718 * 2719 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2720 * enabled sensors. 2721 */ 2722 static void 2723 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2724 struct ice_aqc_list_caps_elem *cap) 2725 { 2726 dev_p->supported_sensors = le32_to_cpu(cap->number); 2727 2728 ice_debug(hw, ICE_DBG_INIT, 2729 "dev caps: supported sensors (bitmap) = 0x%x\n", 2730 dev_p->supported_sensors); 2731 } 2732 2733 /** 2734 * ice_parse_dev_caps - Parse device capabilities 2735 * @hw: pointer to the HW struct 2736 * @dev_p: pointer to device capabilities structure 2737 * @buf: buffer containing the device capability records 2738 * @cap_count: the number of capabilities 2739 * 2740 * Helper device to parse device (0x000B) capabilities list. For 2741 * capabilities shared between device and function, this relies on 2742 * ice_parse_common_caps. 2743 * 2744 * Loop through the list of provided capabilities and extract the relevant 2745 * data into the device capabilities structured. 2746 */ 2747 static void 2748 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2749 void *buf, u32 cap_count) 2750 { 2751 struct ice_aqc_list_caps_elem *cap_resp; 2752 u32 i; 2753 2754 cap_resp = buf; 2755 2756 memset(dev_p, 0, sizeof(*dev_p)); 2757 2758 for (i = 0; i < cap_count; i++) { 2759 u16 cap = le16_to_cpu(cap_resp[i].cap); 2760 bool found; 2761 2762 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2763 &cap_resp[i], "dev caps"); 2764 2765 switch (cap) { 2766 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2767 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2768 break; 2769 case ICE_AQC_CAPS_VF: 2770 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2771 break; 2772 case ICE_AQC_CAPS_VSI: 2773 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2774 break; 2775 case ICE_AQC_CAPS_1588: 2776 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2777 break; 2778 case ICE_AQC_CAPS_FD: 2779 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2780 break; 2781 case ICE_AQC_CAPS_SENSOR_READING: 2782 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2783 break; 2784 default: 2785 /* Don't list common capabilities as unknown */ 2786 if (!found) 2787 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2788 i, cap); 2789 break; 2790 } 2791 } 2792 2793 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2794 } 2795 2796 /** 2797 * ice_is_pf_c827 - check if pf contains c827 phy 2798 * @hw: pointer to the hw struct 2799 */ 2800 bool ice_is_pf_c827(struct ice_hw *hw) 2801 { 2802 struct ice_aqc_get_link_topo cmd = {}; 2803 u8 node_part_number; 2804 u16 node_handle; 2805 int status; 2806 2807 if (hw->mac_type != ICE_MAC_E810) 2808 return false; 2809 2810 if (hw->device_id != ICE_DEV_ID_E810C_QSFP) 2811 return true; 2812 2813 cmd.addr.topo_params.node_type_ctx = 2814 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | 2815 FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); 2816 cmd.addr.topo_params.index = 0; 2817 2818 status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, 2819 &node_handle); 2820 2821 if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) 2822 return false; 2823 2824 if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) 2825 return true; 2826 2827 return false; 2828 } 2829 2830 /** 2831 * ice_is_phy_rclk_in_netlist 2832 * @hw: pointer to the hw struct 2833 * 2834 * Check if the PHY Recovered Clock device is present in the netlist 2835 */ 2836 bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) 2837 { 2838 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2839 ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && 2840 ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2841 ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) 2842 return false; 2843 2844 return true; 2845 } 2846 2847 /** 2848 * ice_is_clock_mux_in_netlist 2849 * @hw: pointer to the hw struct 2850 * 2851 * Check if the Clock Multiplexer device is present in the netlist 2852 */ 2853 bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) 2854 { 2855 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, 2856 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, 2857 NULL)) 2858 return false; 2859 2860 return true; 2861 } 2862 2863 /** 2864 * ice_is_cgu_in_netlist - check for CGU presence 2865 * @hw: pointer to the hw struct 2866 * 2867 * Check if the Clock Generation Unit (CGU) device is present in the netlist. 2868 * Save the CGU part number in the hw structure for later use. 2869 * Return: 2870 * * true - cgu is present 2871 * * false - cgu is not present 2872 */ 2873 bool ice_is_cgu_in_netlist(struct ice_hw *hw) 2874 { 2875 if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2876 ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, 2877 NULL)) { 2878 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; 2879 return true; 2880 } else if (!ice_find_netlist_node(hw, 2881 ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, 2882 ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, 2883 NULL)) { 2884 hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; 2885 return true; 2886 } 2887 2888 return false; 2889 } 2890 2891 /** 2892 * ice_is_gps_in_netlist 2893 * @hw: pointer to the hw struct 2894 * 2895 * Check if the GPS generic device is present in the netlist 2896 */ 2897 bool ice_is_gps_in_netlist(struct ice_hw *hw) 2898 { 2899 if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, 2900 ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) 2901 return false; 2902 2903 return true; 2904 } 2905 2906 /** 2907 * ice_aq_list_caps - query function/device capabilities 2908 * @hw: pointer to the HW struct 2909 * @buf: a buffer to hold the capabilities 2910 * @buf_size: size of the buffer 2911 * @cap_count: if not NULL, set to the number of capabilities reported 2912 * @opc: capabilities type to discover, device or function 2913 * @cd: pointer to command details structure or NULL 2914 * 2915 * Get the function (0x000A) or device (0x000B) capabilities description from 2916 * firmware and store it in the buffer. 2917 * 2918 * If the cap_count pointer is not NULL, then it is set to the number of 2919 * capabilities firmware will report. Note that if the buffer size is too 2920 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2921 * cap_count will still be updated in this case. It is recommended that the 2922 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2923 * firmware could return) to avoid this. 2924 */ 2925 int 2926 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2927 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2928 { 2929 struct ice_aqc_list_caps *cmd; 2930 struct ice_aq_desc desc; 2931 int status; 2932 2933 cmd = &desc.params.get_cap; 2934 2935 if (opc != ice_aqc_opc_list_func_caps && 2936 opc != ice_aqc_opc_list_dev_caps) 2937 return -EINVAL; 2938 2939 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2940 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2941 2942 if (cap_count) 2943 *cap_count = le32_to_cpu(cmd->count); 2944 2945 return status; 2946 } 2947 2948 /** 2949 * ice_discover_dev_caps - Read and extract device capabilities 2950 * @hw: pointer to the hardware structure 2951 * @dev_caps: pointer to device capabilities structure 2952 * 2953 * Read the device capabilities and extract them into the dev_caps structure 2954 * for later use. 2955 */ 2956 int 2957 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2958 { 2959 u32 cap_count = 0; 2960 void *cbuf; 2961 int status; 2962 2963 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2964 if (!cbuf) 2965 return -ENOMEM; 2966 2967 /* Although the driver doesn't know the number of capabilities the 2968 * device will return, we can simply send a 4KB buffer, the maximum 2969 * possible size that firmware can return. 2970 */ 2971 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2972 2973 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2974 ice_aqc_opc_list_dev_caps, NULL); 2975 if (!status) 2976 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2977 kfree(cbuf); 2978 2979 return status; 2980 } 2981 2982 /** 2983 * ice_discover_func_caps - Read and extract function capabilities 2984 * @hw: pointer to the hardware structure 2985 * @func_caps: pointer to function capabilities structure 2986 * 2987 * Read the function capabilities and extract them into the func_caps structure 2988 * for later use. 2989 */ 2990 static int 2991 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2992 { 2993 u32 cap_count = 0; 2994 void *cbuf; 2995 int status; 2996 2997 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2998 if (!cbuf) 2999 return -ENOMEM; 3000 3001 /* Although the driver doesn't know the number of capabilities the 3002 * device will return, we can simply send a 4KB buffer, the maximum 3003 * possible size that firmware can return. 3004 */ 3005 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 3006 3007 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 3008 ice_aqc_opc_list_func_caps, NULL); 3009 if (!status) 3010 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 3011 kfree(cbuf); 3012 3013 return status; 3014 } 3015 3016 /** 3017 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 3018 * @hw: pointer to the hardware structure 3019 */ 3020 void ice_set_safe_mode_caps(struct ice_hw *hw) 3021 { 3022 struct ice_hw_func_caps *func_caps = &hw->func_caps; 3023 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 3024 struct ice_hw_common_caps cached_caps; 3025 u32 num_funcs; 3026 3027 /* cache some func_caps values that should be restored after memset */ 3028 cached_caps = func_caps->common_cap; 3029 3030 /* unset func capabilities */ 3031 memset(func_caps, 0, sizeof(*func_caps)); 3032 3033 #define ICE_RESTORE_FUNC_CAP(name) \ 3034 func_caps->common_cap.name = cached_caps.name 3035 3036 /* restore cached values */ 3037 ICE_RESTORE_FUNC_CAP(valid_functions); 3038 ICE_RESTORE_FUNC_CAP(txq_first_id); 3039 ICE_RESTORE_FUNC_CAP(rxq_first_id); 3040 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 3041 ICE_RESTORE_FUNC_CAP(max_mtu); 3042 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 3043 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 3044 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 3045 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 3046 3047 /* one Tx and one Rx queue in safe mode */ 3048 func_caps->common_cap.num_rxq = 1; 3049 func_caps->common_cap.num_txq = 1; 3050 3051 /* two MSIX vectors, one for traffic and one for misc causes */ 3052 func_caps->common_cap.num_msix_vectors = 2; 3053 func_caps->guar_num_vsi = 1; 3054 3055 /* cache some dev_caps values that should be restored after memset */ 3056 cached_caps = dev_caps->common_cap; 3057 num_funcs = dev_caps->num_funcs; 3058 3059 /* unset dev capabilities */ 3060 memset(dev_caps, 0, sizeof(*dev_caps)); 3061 3062 #define ICE_RESTORE_DEV_CAP(name) \ 3063 dev_caps->common_cap.name = cached_caps.name 3064 3065 /* restore cached values */ 3066 ICE_RESTORE_DEV_CAP(valid_functions); 3067 ICE_RESTORE_DEV_CAP(txq_first_id); 3068 ICE_RESTORE_DEV_CAP(rxq_first_id); 3069 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 3070 ICE_RESTORE_DEV_CAP(max_mtu); 3071 ICE_RESTORE_DEV_CAP(nvm_unified_update); 3072 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 3073 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 3074 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 3075 dev_caps->num_funcs = num_funcs; 3076 3077 /* one Tx and one Rx queue per function in safe mode */ 3078 dev_caps->common_cap.num_rxq = num_funcs; 3079 dev_caps->common_cap.num_txq = num_funcs; 3080 3081 /* two MSIX vectors per function */ 3082 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 3083 } 3084 3085 /** 3086 * ice_get_caps - get info about the HW 3087 * @hw: pointer to the hardware structure 3088 */ 3089 int ice_get_caps(struct ice_hw *hw) 3090 { 3091 int status; 3092 3093 status = ice_discover_dev_caps(hw, &hw->dev_caps); 3094 if (status) 3095 return status; 3096 3097 return ice_discover_func_caps(hw, &hw->func_caps); 3098 } 3099 3100 /** 3101 * ice_aq_manage_mac_write - manage MAC address write command 3102 * @hw: pointer to the HW struct 3103 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 3104 * @flags: flags to control write behavior 3105 * @cd: pointer to command details structure or NULL 3106 * 3107 * This function is used to write MAC address to the NVM (0x0108). 3108 */ 3109 int 3110 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 3111 struct ice_sq_cd *cd) 3112 { 3113 struct ice_aqc_manage_mac_write *cmd; 3114 struct ice_aq_desc desc; 3115 3116 cmd = &desc.params.mac_write; 3117 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 3118 3119 cmd->flags = flags; 3120 ether_addr_copy(cmd->mac_addr, mac_addr); 3121 3122 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3123 } 3124 3125 /** 3126 * ice_aq_clear_pxe_mode 3127 * @hw: pointer to the HW struct 3128 * 3129 * Tell the firmware that the driver is taking over from PXE (0x0110). 3130 */ 3131 static int ice_aq_clear_pxe_mode(struct ice_hw *hw) 3132 { 3133 struct ice_aq_desc desc; 3134 3135 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3136 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3137 3138 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3139 } 3140 3141 /** 3142 * ice_clear_pxe_mode - clear pxe operations mode 3143 * @hw: pointer to the HW struct 3144 * 3145 * Make sure all PXE mode settings are cleared, including things 3146 * like descriptor fetch/write-back mode. 3147 */ 3148 void ice_clear_pxe_mode(struct ice_hw *hw) 3149 { 3150 if (ice_check_sq_alive(hw, &hw->adminq)) 3151 ice_aq_clear_pxe_mode(hw); 3152 } 3153 3154 /** 3155 * ice_aq_set_port_params - set physical port parameters. 3156 * @pi: pointer to the port info struct 3157 * @double_vlan: if set double VLAN is enabled 3158 * @cd: pointer to command details structure or NULL 3159 * 3160 * Set Physical port parameters (0x0203) 3161 */ 3162 int 3163 ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, 3164 struct ice_sq_cd *cd) 3165 3166 { 3167 struct ice_aqc_set_port_params *cmd; 3168 struct ice_hw *hw = pi->hw; 3169 struct ice_aq_desc desc; 3170 u16 cmd_flags = 0; 3171 3172 cmd = &desc.params.set_port_params; 3173 3174 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3175 if (double_vlan) 3176 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3177 cmd->cmd_flags = cpu_to_le16(cmd_flags); 3178 3179 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3180 } 3181 3182 /** 3183 * ice_is_100m_speed_supported 3184 * @hw: pointer to the HW struct 3185 * 3186 * returns true if 100M speeds are supported by the device, 3187 * false otherwise. 3188 */ 3189 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3190 { 3191 switch (hw->device_id) { 3192 case ICE_DEV_ID_E822C_SGMII: 3193 case ICE_DEV_ID_E822L_SGMII: 3194 case ICE_DEV_ID_E823L_1GBE: 3195 case ICE_DEV_ID_E823C_SGMII: 3196 return true; 3197 default: 3198 return false; 3199 } 3200 } 3201 3202 /** 3203 * ice_get_link_speed_based_on_phy_type - returns link speed 3204 * @phy_type_low: lower part of phy_type 3205 * @phy_type_high: higher part of phy_type 3206 * 3207 * This helper function will convert an entry in PHY type structure 3208 * [phy_type_low, phy_type_high] to its corresponding link speed. 3209 * Note: In the structure of [phy_type_low, phy_type_high], there should 3210 * be one bit set, as this function will convert one PHY type to its 3211 * speed. 3212 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3213 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3214 */ 3215 static u16 3216 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3217 { 3218 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3219 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3220 3221 switch (phy_type_low) { 3222 case ICE_PHY_TYPE_LOW_100BASE_TX: 3223 case ICE_PHY_TYPE_LOW_100M_SGMII: 3224 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3225 break; 3226 case ICE_PHY_TYPE_LOW_1000BASE_T: 3227 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3228 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3229 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3230 case ICE_PHY_TYPE_LOW_1G_SGMII: 3231 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3232 break; 3233 case ICE_PHY_TYPE_LOW_2500BASE_T: 3234 case ICE_PHY_TYPE_LOW_2500BASE_X: 3235 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3236 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3237 break; 3238 case ICE_PHY_TYPE_LOW_5GBASE_T: 3239 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3240 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3241 break; 3242 case ICE_PHY_TYPE_LOW_10GBASE_T: 3243 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3244 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3245 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3246 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3247 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3248 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3249 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3250 break; 3251 case ICE_PHY_TYPE_LOW_25GBASE_T: 3252 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3253 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3254 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3255 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3256 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3257 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3258 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3259 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3260 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3261 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3262 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3263 break; 3264 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3265 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3266 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3267 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3268 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3269 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3270 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3271 break; 3272 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3273 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3274 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3275 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3276 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3277 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3278 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3279 case ICE_PHY_TYPE_LOW_50G_AUI2: 3280 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3281 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3282 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3283 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3284 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3285 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3286 case ICE_PHY_TYPE_LOW_50G_AUI1: 3287 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3288 break; 3289 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3290 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3291 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3292 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3293 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3294 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3295 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3296 case ICE_PHY_TYPE_LOW_100G_AUI4: 3297 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3298 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3299 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3300 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3301 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3302 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3303 break; 3304 default: 3305 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3306 break; 3307 } 3308 3309 switch (phy_type_high) { 3310 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3311 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3312 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3313 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3314 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3315 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3316 break; 3317 default: 3318 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3319 break; 3320 } 3321 3322 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3323 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3324 return ICE_AQ_LINK_SPEED_UNKNOWN; 3325 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3326 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3327 return ICE_AQ_LINK_SPEED_UNKNOWN; 3328 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3329 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3330 return speed_phy_type_low; 3331 else 3332 return speed_phy_type_high; 3333 } 3334 3335 /** 3336 * ice_update_phy_type 3337 * @phy_type_low: pointer to the lower part of phy_type 3338 * @phy_type_high: pointer to the higher part of phy_type 3339 * @link_speeds_bitmap: targeted link speeds bitmap 3340 * 3341 * Note: For the link_speeds_bitmap structure, you can check it at 3342 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3343 * link_speeds_bitmap include multiple speeds. 3344 * 3345 * Each entry in this [phy_type_low, phy_type_high] structure will 3346 * present a certain link speed. This helper function will turn on bits 3347 * in [phy_type_low, phy_type_high] structure based on the value of 3348 * link_speeds_bitmap input parameter. 3349 */ 3350 void 3351 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3352 u16 link_speeds_bitmap) 3353 { 3354 u64 pt_high; 3355 u64 pt_low; 3356 int index; 3357 u16 speed; 3358 3359 /* We first check with low part of phy_type */ 3360 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3361 pt_low = BIT_ULL(index); 3362 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3363 3364 if (link_speeds_bitmap & speed) 3365 *phy_type_low |= BIT_ULL(index); 3366 } 3367 3368 /* We then check with high part of phy_type */ 3369 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3370 pt_high = BIT_ULL(index); 3371 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3372 3373 if (link_speeds_bitmap & speed) 3374 *phy_type_high |= BIT_ULL(index); 3375 } 3376 } 3377 3378 /** 3379 * ice_aq_set_phy_cfg 3380 * @hw: pointer to the HW struct 3381 * @pi: port info structure of the interested logical port 3382 * @cfg: structure with PHY configuration data to be set 3383 * @cd: pointer to command details structure or NULL 3384 * 3385 * Set the various PHY configuration parameters supported on the Port. 3386 * One or more of the Set PHY config parameters may be ignored in an MFP 3387 * mode as the PF may not have the privilege to set some of the PHY Config 3388 * parameters. This status will be indicated by the command response (0x0601). 3389 */ 3390 int 3391 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3392 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3393 { 3394 struct ice_aq_desc desc; 3395 int status; 3396 3397 if (!cfg) 3398 return -EINVAL; 3399 3400 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3401 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3402 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3403 cfg->caps); 3404 3405 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3406 } 3407 3408 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3409 desc.params.set_phy.lport_num = pi->lport; 3410 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3411 3412 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3413 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3414 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 3415 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3416 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 3417 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3418 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3419 cfg->low_power_ctrl_an); 3420 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3421 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3422 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3423 cfg->link_fec_opt); 3424 3425 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3426 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3427 status = 0; 3428 3429 if (!status) 3430 pi->phy.curr_user_phy_cfg = *cfg; 3431 3432 return status; 3433 } 3434 3435 /** 3436 * ice_update_link_info - update status of the HW network link 3437 * @pi: port info structure of the interested logical port 3438 */ 3439 int ice_update_link_info(struct ice_port_info *pi) 3440 { 3441 struct ice_link_status *li; 3442 int status; 3443 3444 if (!pi) 3445 return -EINVAL; 3446 3447 li = &pi->phy.link_info; 3448 3449 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3450 if (status) 3451 return status; 3452 3453 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3454 struct ice_aqc_get_phy_caps_data *pcaps; 3455 struct ice_hw *hw; 3456 3457 hw = pi->hw; 3458 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 3459 GFP_KERNEL); 3460 if (!pcaps) 3461 return -ENOMEM; 3462 3463 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3464 pcaps, NULL); 3465 3466 devm_kfree(ice_hw_to_dev(hw), pcaps); 3467 } 3468 3469 return status; 3470 } 3471 3472 /** 3473 * ice_cache_phy_user_req 3474 * @pi: port information structure 3475 * @cache_data: PHY logging data 3476 * @cache_mode: PHY logging mode 3477 * 3478 * Log the user request on (FC, FEC, SPEED) for later use. 3479 */ 3480 static void 3481 ice_cache_phy_user_req(struct ice_port_info *pi, 3482 struct ice_phy_cache_mode_data cache_data, 3483 enum ice_phy_cache_mode cache_mode) 3484 { 3485 if (!pi) 3486 return; 3487 3488 switch (cache_mode) { 3489 case ICE_FC_MODE: 3490 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3491 break; 3492 case ICE_SPEED_MODE: 3493 pi->phy.curr_user_speed_req = 3494 cache_data.data.curr_user_speed_req; 3495 break; 3496 case ICE_FEC_MODE: 3497 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3498 break; 3499 default: 3500 break; 3501 } 3502 } 3503 3504 /** 3505 * ice_caps_to_fc_mode 3506 * @caps: PHY capabilities 3507 * 3508 * Convert PHY FC capabilities to ice FC mode 3509 */ 3510 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3511 { 3512 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3513 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3514 return ICE_FC_FULL; 3515 3516 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3517 return ICE_FC_TX_PAUSE; 3518 3519 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3520 return ICE_FC_RX_PAUSE; 3521 3522 return ICE_FC_NONE; 3523 } 3524 3525 /** 3526 * ice_caps_to_fec_mode 3527 * @caps: PHY capabilities 3528 * @fec_options: Link FEC options 3529 * 3530 * Convert PHY FEC capabilities to ice FEC mode 3531 */ 3532 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3533 { 3534 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3535 return ICE_FEC_AUTO; 3536 3537 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3538 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3539 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3540 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3541 return ICE_FEC_BASER; 3542 3543 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3544 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3545 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3546 return ICE_FEC_RS; 3547 3548 return ICE_FEC_NONE; 3549 } 3550 3551 /** 3552 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3553 * @pi: port information structure 3554 * @cfg: PHY configuration data to set FC mode 3555 * @req_mode: FC mode to configure 3556 */ 3557 int 3558 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3559 enum ice_fc_mode req_mode) 3560 { 3561 struct ice_phy_cache_mode_data cache_data; 3562 u8 pause_mask = 0x0; 3563 3564 if (!pi || !cfg) 3565 return -EINVAL; 3566 3567 switch (req_mode) { 3568 case ICE_FC_FULL: 3569 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3570 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3571 break; 3572 case ICE_FC_RX_PAUSE: 3573 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3574 break; 3575 case ICE_FC_TX_PAUSE: 3576 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3577 break; 3578 default: 3579 break; 3580 } 3581 3582 /* clear the old pause settings */ 3583 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3584 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3585 3586 /* set the new capabilities */ 3587 cfg->caps |= pause_mask; 3588 3589 /* Cache user FC request */ 3590 cache_data.data.curr_user_fc_req = req_mode; 3591 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3592 3593 return 0; 3594 } 3595 3596 /** 3597 * ice_set_fc 3598 * @pi: port information structure 3599 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3600 * @ena_auto_link_update: enable automatic link update 3601 * 3602 * Set the requested flow control mode. 3603 */ 3604 int 3605 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3606 { 3607 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3608 struct ice_aqc_get_phy_caps_data *pcaps; 3609 struct ice_hw *hw; 3610 int status; 3611 3612 if (!pi || !aq_failures) 3613 return -EINVAL; 3614 3615 *aq_failures = 0; 3616 hw = pi->hw; 3617 3618 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3619 if (!pcaps) 3620 return -ENOMEM; 3621 3622 /* Get the current PHY config */ 3623 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3624 pcaps, NULL); 3625 if (status) { 3626 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3627 goto out; 3628 } 3629 3630 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3631 3632 /* Configure the set PHY data */ 3633 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3634 if (status) 3635 goto out; 3636 3637 /* If the capabilities have changed, then set the new config */ 3638 if (cfg.caps != pcaps->caps) { 3639 int retry_count, retry_max = 10; 3640 3641 /* Auto restart link so settings take effect */ 3642 if (ena_auto_link_update) 3643 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3644 3645 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3646 if (status) { 3647 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3648 goto out; 3649 } 3650 3651 /* Update the link info 3652 * It sometimes takes a really long time for link to 3653 * come back from the atomic reset. Thus, we wait a 3654 * little bit. 3655 */ 3656 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3657 status = ice_update_link_info(pi); 3658 3659 if (!status) 3660 break; 3661 3662 mdelay(100); 3663 } 3664 3665 if (status) 3666 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3667 } 3668 3669 out: 3670 devm_kfree(ice_hw_to_dev(hw), pcaps); 3671 return status; 3672 } 3673 3674 /** 3675 * ice_phy_caps_equals_cfg 3676 * @phy_caps: PHY capabilities 3677 * @phy_cfg: PHY configuration 3678 * 3679 * Helper function to determine if PHY capabilities matches PHY 3680 * configuration 3681 */ 3682 bool 3683 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3684 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3685 { 3686 u8 caps_mask, cfg_mask; 3687 3688 if (!phy_caps || !phy_cfg) 3689 return false; 3690 3691 /* These bits are not common between capabilities and configuration. 3692 * Do not use them to determine equality. 3693 */ 3694 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3695 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3696 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3697 3698 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3699 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3700 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3701 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3702 phy_caps->eee_cap != phy_cfg->eee_cap || 3703 phy_caps->eeer_value != phy_cfg->eeer_value || 3704 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3705 return false; 3706 3707 return true; 3708 } 3709 3710 /** 3711 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3712 * @pi: port information structure 3713 * @caps: PHY ability structure to copy date from 3714 * @cfg: PHY configuration structure to copy data to 3715 * 3716 * Helper function to copy AQC PHY get ability data to PHY set configuration 3717 * data structure 3718 */ 3719 void 3720 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3721 struct ice_aqc_get_phy_caps_data *caps, 3722 struct ice_aqc_set_phy_cfg_data *cfg) 3723 { 3724 if (!pi || !caps || !cfg) 3725 return; 3726 3727 memset(cfg, 0, sizeof(*cfg)); 3728 cfg->phy_type_low = caps->phy_type_low; 3729 cfg->phy_type_high = caps->phy_type_high; 3730 cfg->caps = caps->caps; 3731 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3732 cfg->eee_cap = caps->eee_cap; 3733 cfg->eeer_value = caps->eeer_value; 3734 cfg->link_fec_opt = caps->link_fec_options; 3735 cfg->module_compliance_enforcement = 3736 caps->module_compliance_enforcement; 3737 } 3738 3739 /** 3740 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3741 * @pi: port information structure 3742 * @cfg: PHY configuration data to set FEC mode 3743 * @fec: FEC mode to configure 3744 */ 3745 int 3746 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3747 enum ice_fec_mode fec) 3748 { 3749 struct ice_aqc_get_phy_caps_data *pcaps; 3750 struct ice_hw *hw; 3751 int status; 3752 3753 if (!pi || !cfg) 3754 return -EINVAL; 3755 3756 hw = pi->hw; 3757 3758 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3759 if (!pcaps) 3760 return -ENOMEM; 3761 3762 status = ice_aq_get_phy_caps(pi, false, 3763 (ice_fw_supports_report_dflt_cfg(hw) ? 3764 ICE_AQC_REPORT_DFLT_CFG : 3765 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3766 if (status) 3767 goto out; 3768 3769 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3770 cfg->link_fec_opt = pcaps->link_fec_options; 3771 3772 switch (fec) { 3773 case ICE_FEC_BASER: 3774 /* Clear RS bits, and AND BASE-R ability 3775 * bits and OR request bits. 3776 */ 3777 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3778 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3779 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3780 ICE_AQC_PHY_FEC_25G_KR_REQ; 3781 break; 3782 case ICE_FEC_RS: 3783 /* Clear BASE-R bits, and AND RS ability 3784 * bits and OR request bits. 3785 */ 3786 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3787 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3788 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3789 break; 3790 case ICE_FEC_NONE: 3791 /* Clear all FEC option bits. */ 3792 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3793 break; 3794 case ICE_FEC_AUTO: 3795 /* AND auto FEC bit, and all caps bits. */ 3796 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3797 cfg->link_fec_opt |= pcaps->link_fec_options; 3798 break; 3799 default: 3800 status = -EINVAL; 3801 break; 3802 } 3803 3804 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3805 !ice_fw_supports_report_dflt_cfg(hw)) { 3806 struct ice_link_default_override_tlv tlv = { 0 }; 3807 3808 status = ice_get_link_default_override(&tlv, pi); 3809 if (status) 3810 goto out; 3811 3812 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3813 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3814 cfg->link_fec_opt = tlv.fec_options; 3815 } 3816 3817 out: 3818 kfree(pcaps); 3819 3820 return status; 3821 } 3822 3823 /** 3824 * ice_get_link_status - get status of the HW network link 3825 * @pi: port information structure 3826 * @link_up: pointer to bool (true/false = linkup/linkdown) 3827 * 3828 * Variable link_up is true if link is up, false if link is down. 3829 * The variable link_up is invalid if status is non zero. As a 3830 * result of this call, link status reporting becomes enabled 3831 */ 3832 int ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3833 { 3834 struct ice_phy_info *phy_info; 3835 int status = 0; 3836 3837 if (!pi || !link_up) 3838 return -EINVAL; 3839 3840 phy_info = &pi->phy; 3841 3842 if (phy_info->get_link_info) { 3843 status = ice_update_link_info(pi); 3844 3845 if (status) 3846 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3847 status); 3848 } 3849 3850 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3851 3852 return status; 3853 } 3854 3855 /** 3856 * ice_aq_set_link_restart_an 3857 * @pi: pointer to the port information structure 3858 * @ena_link: if true: enable link, if false: disable link 3859 * @cd: pointer to command details structure or NULL 3860 * 3861 * Sets up the link and restarts the Auto-Negotiation over the link. 3862 */ 3863 int 3864 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3865 struct ice_sq_cd *cd) 3866 { 3867 struct ice_aqc_restart_an *cmd; 3868 struct ice_aq_desc desc; 3869 3870 cmd = &desc.params.restart_an; 3871 3872 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3873 3874 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3875 cmd->lport_num = pi->lport; 3876 if (ena_link) 3877 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3878 else 3879 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3880 3881 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3882 } 3883 3884 /** 3885 * ice_aq_set_event_mask 3886 * @hw: pointer to the HW struct 3887 * @port_num: port number of the physical function 3888 * @mask: event mask to be set 3889 * @cd: pointer to command details structure or NULL 3890 * 3891 * Set event mask (0x0613) 3892 */ 3893 int 3894 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3895 struct ice_sq_cd *cd) 3896 { 3897 struct ice_aqc_set_event_mask *cmd; 3898 struct ice_aq_desc desc; 3899 3900 cmd = &desc.params.set_event_mask; 3901 3902 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3903 3904 cmd->lport_num = port_num; 3905 3906 cmd->event_mask = cpu_to_le16(mask); 3907 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3908 } 3909 3910 /** 3911 * ice_aq_set_mac_loopback 3912 * @hw: pointer to the HW struct 3913 * @ena_lpbk: Enable or Disable loopback 3914 * @cd: pointer to command details structure or NULL 3915 * 3916 * Enable/disable loopback on a given port 3917 */ 3918 int 3919 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3920 { 3921 struct ice_aqc_set_mac_lb *cmd; 3922 struct ice_aq_desc desc; 3923 3924 cmd = &desc.params.set_mac_lb; 3925 3926 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3927 if (ena_lpbk) 3928 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3929 3930 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3931 } 3932 3933 /** 3934 * ice_aq_set_port_id_led 3935 * @pi: pointer to the port information 3936 * @is_orig_mode: is this LED set to original mode (by the net-list) 3937 * @cd: pointer to command details structure or NULL 3938 * 3939 * Set LED value for the given port (0x06e9) 3940 */ 3941 int 3942 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3943 struct ice_sq_cd *cd) 3944 { 3945 struct ice_aqc_set_port_id_led *cmd; 3946 struct ice_hw *hw = pi->hw; 3947 struct ice_aq_desc desc; 3948 3949 cmd = &desc.params.set_port_id_led; 3950 3951 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3952 3953 if (is_orig_mode) 3954 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3955 else 3956 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3957 3958 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3959 } 3960 3961 /** 3962 * ice_aq_get_port_options 3963 * @hw: pointer to the HW struct 3964 * @options: buffer for the resultant port options 3965 * @option_count: input - size of the buffer in port options structures, 3966 * output - number of returned port options 3967 * @lport: logical port to call the command with (optional) 3968 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 3969 * when PF owns more than 1 port it must be true 3970 * @active_option_idx: index of active port option in returned buffer 3971 * @active_option_valid: active option in returned buffer is valid 3972 * @pending_option_idx: index of pending port option in returned buffer 3973 * @pending_option_valid: pending option in returned buffer is valid 3974 * 3975 * Calls Get Port Options AQC (0x06ea) and verifies result. 3976 */ 3977 int 3978 ice_aq_get_port_options(struct ice_hw *hw, 3979 struct ice_aqc_get_port_options_elem *options, 3980 u8 *option_count, u8 lport, bool lport_valid, 3981 u8 *active_option_idx, bool *active_option_valid, 3982 u8 *pending_option_idx, bool *pending_option_valid) 3983 { 3984 struct ice_aqc_get_port_options *cmd; 3985 struct ice_aq_desc desc; 3986 int status; 3987 u8 i; 3988 3989 /* options buffer shall be able to hold max returned options */ 3990 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 3991 return -EINVAL; 3992 3993 cmd = &desc.params.get_port_options; 3994 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 3995 3996 if (lport_valid) 3997 cmd->lport_num = lport; 3998 cmd->lport_num_valid = lport_valid; 3999 4000 status = ice_aq_send_cmd(hw, &desc, options, 4001 *option_count * sizeof(*options), NULL); 4002 if (status) 4003 return status; 4004 4005 /* verify direct FW response & set output parameters */ 4006 *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M, 4007 cmd->port_options_count); 4008 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 4009 *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID, 4010 cmd->port_options); 4011 if (*active_option_valid) { 4012 *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M, 4013 cmd->port_options); 4014 if (*active_option_idx > (*option_count - 1)) 4015 return -EIO; 4016 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 4017 *active_option_idx); 4018 } 4019 4020 *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID, 4021 cmd->pending_port_option_status); 4022 if (*pending_option_valid) { 4023 *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M, 4024 cmd->pending_port_option_status); 4025 if (*pending_option_idx > (*option_count - 1)) 4026 return -EIO; 4027 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 4028 *pending_option_idx); 4029 } 4030 4031 /* mask output options fields */ 4032 for (i = 0; i < *option_count; i++) { 4033 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 4034 options[i].pmd); 4035 options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M, 4036 options[i].max_lane_speed); 4037 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 4038 options[i].pmd, options[i].max_lane_speed); 4039 } 4040 4041 return 0; 4042 } 4043 4044 /** 4045 * ice_aq_set_port_option 4046 * @hw: pointer to the HW struct 4047 * @lport: logical port to call the command with 4048 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 4049 * when PF owns more than 1 port it must be true 4050 * @new_option: new port option to be written 4051 * 4052 * Calls Set Port Options AQC (0x06eb). 4053 */ 4054 int 4055 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 4056 u8 new_option) 4057 { 4058 struct ice_aqc_set_port_option *cmd; 4059 struct ice_aq_desc desc; 4060 4061 if (new_option > ICE_AQC_PORT_OPT_COUNT_M) 4062 return -EINVAL; 4063 4064 cmd = &desc.params.set_port_option; 4065 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 4066 4067 if (lport_valid) 4068 cmd->lport_num = lport; 4069 4070 cmd->lport_num_valid = lport_valid; 4071 cmd->selected_port_option = new_option; 4072 4073 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4074 } 4075 4076 /** 4077 * ice_aq_sff_eeprom 4078 * @hw: pointer to the HW struct 4079 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 4080 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 4081 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 4082 * @page: QSFP page 4083 * @set_page: set or ignore the page 4084 * @data: pointer to data buffer to be read/written to the I2C device. 4085 * @length: 1-16 for read, 1 for write. 4086 * @write: 0 read, 1 for write. 4087 * @cd: pointer to command details structure or NULL 4088 * 4089 * Read/Write SFF EEPROM (0x06EE) 4090 */ 4091 int 4092 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 4093 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 4094 bool write, struct ice_sq_cd *cd) 4095 { 4096 struct ice_aqc_sff_eeprom *cmd; 4097 struct ice_aq_desc desc; 4098 int status; 4099 4100 if (!data || (mem_addr & 0xff00)) 4101 return -EINVAL; 4102 4103 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 4104 cmd = &desc.params.read_write_sff_param; 4105 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 4106 cmd->lport_num = (u8)(lport & 0xff); 4107 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 4108 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 4109 ICE_AQC_SFF_I2CBUS_7BIT_M) | 4110 ((set_page << 4111 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 4112 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 4113 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 4114 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 4115 if (write) 4116 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 4117 4118 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 4119 return status; 4120 } 4121 4122 static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type) 4123 { 4124 switch (type) { 4125 case ICE_LUT_VSI: 4126 return ICE_LUT_VSI_SIZE; 4127 case ICE_LUT_GLOBAL: 4128 return ICE_LUT_GLOBAL_SIZE; 4129 case ICE_LUT_PF: 4130 return ICE_LUT_PF_SIZE; 4131 } 4132 WARN_ONCE(1, "incorrect type passed"); 4133 return ICE_LUT_VSI_SIZE; 4134 } 4135 4136 static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size) 4137 { 4138 switch (size) { 4139 case ICE_LUT_VSI_SIZE: 4140 return ICE_AQC_LUT_SIZE_SMALL; 4141 case ICE_LUT_GLOBAL_SIZE: 4142 return ICE_AQC_LUT_SIZE_512; 4143 case ICE_LUT_PF_SIZE: 4144 return ICE_AQC_LUT_SIZE_2K; 4145 } 4146 WARN_ONCE(1, "incorrect size passed"); 4147 return 0; 4148 } 4149 4150 /** 4151 * __ice_aq_get_set_rss_lut 4152 * @hw: pointer to the hardware structure 4153 * @params: RSS LUT parameters 4154 * @set: set true to set the table, false to get the table 4155 * 4156 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4157 */ 4158 static int 4159 __ice_aq_get_set_rss_lut(struct ice_hw *hw, 4160 struct ice_aq_get_set_rss_lut_params *params, bool set) 4161 { 4162 u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0; 4163 enum ice_lut_type lut_type = params->lut_type; 4164 struct ice_aqc_get_set_rss_lut *desc_params; 4165 enum ice_aqc_lut_flags flags; 4166 enum ice_lut_size lut_size; 4167 struct ice_aq_desc desc; 4168 u8 *lut = params->lut; 4169 4170 4171 if (!lut || !ice_is_vsi_valid(hw, vsi_handle)) 4172 return -EINVAL; 4173 4174 lut_size = ice_lut_type_to_size(lut_type); 4175 if (lut_size > params->lut_size) 4176 return -EINVAL; 4177 else if (set && lut_size != params->lut_size) 4178 return -EINVAL; 4179 4180 opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut; 4181 ice_fill_dflt_direct_cmd_desc(&desc, opcode); 4182 if (set) 4183 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4184 4185 desc_params = &desc.params.get_set_rss_lut; 4186 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4187 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4188 4189 if (lut_type == ICE_LUT_GLOBAL) 4190 glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX, 4191 params->global_lut_id); 4192 4193 flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size); 4194 desc_params->flags = cpu_to_le16(flags); 4195 4196 return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4197 } 4198 4199 /** 4200 * ice_aq_get_rss_lut 4201 * @hw: pointer to the hardware structure 4202 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4203 * 4204 * get the RSS lookup table, PF or VSI type 4205 */ 4206 int 4207 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4208 { 4209 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4210 } 4211 4212 /** 4213 * ice_aq_set_rss_lut 4214 * @hw: pointer to the hardware structure 4215 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4216 * 4217 * set the RSS lookup table, PF or VSI type 4218 */ 4219 int 4220 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4221 { 4222 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4223 } 4224 4225 /** 4226 * __ice_aq_get_set_rss_key 4227 * @hw: pointer to the HW struct 4228 * @vsi_id: VSI FW index 4229 * @key: pointer to key info struct 4230 * @set: set true to set the key, false to get the key 4231 * 4232 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4233 */ 4234 static int 4235 __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4236 struct ice_aqc_get_set_rss_keys *key, bool set) 4237 { 4238 struct ice_aqc_get_set_rss_key *desc_params; 4239 u16 key_size = sizeof(*key); 4240 struct ice_aq_desc desc; 4241 4242 if (set) { 4243 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4244 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4245 } else { 4246 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4247 } 4248 4249 desc_params = &desc.params.get_set_rss_key; 4250 desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID); 4251 4252 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4253 } 4254 4255 /** 4256 * ice_aq_get_rss_key 4257 * @hw: pointer to the HW struct 4258 * @vsi_handle: software VSI handle 4259 * @key: pointer to key info struct 4260 * 4261 * get the RSS key per VSI 4262 */ 4263 int 4264 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4265 struct ice_aqc_get_set_rss_keys *key) 4266 { 4267 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4268 return -EINVAL; 4269 4270 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4271 key, false); 4272 } 4273 4274 /** 4275 * ice_aq_set_rss_key 4276 * @hw: pointer to the HW struct 4277 * @vsi_handle: software VSI handle 4278 * @keys: pointer to key info struct 4279 * 4280 * set the RSS key per VSI 4281 */ 4282 int 4283 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4284 struct ice_aqc_get_set_rss_keys *keys) 4285 { 4286 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4287 return -EINVAL; 4288 4289 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4290 keys, true); 4291 } 4292 4293 /** 4294 * ice_aq_add_lan_txq 4295 * @hw: pointer to the hardware structure 4296 * @num_qgrps: Number of added queue groups 4297 * @qg_list: list of queue groups to be added 4298 * @buf_size: size of buffer for indirect command 4299 * @cd: pointer to command details structure or NULL 4300 * 4301 * Add Tx LAN queue (0x0C30) 4302 * 4303 * NOTE: 4304 * Prior to calling add Tx LAN queue: 4305 * Initialize the following as part of the Tx queue context: 4306 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4307 * Cache profile and Packet shaper profile. 4308 * 4309 * After add Tx LAN queue AQ command is completed: 4310 * Interrupts should be associated with specific queues, 4311 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4312 * flow. 4313 */ 4314 static int 4315 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4316 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4317 struct ice_sq_cd *cd) 4318 { 4319 struct ice_aqc_add_tx_qgrp *list; 4320 struct ice_aqc_add_txqs *cmd; 4321 struct ice_aq_desc desc; 4322 u16 i, sum_size = 0; 4323 4324 cmd = &desc.params.add_txqs; 4325 4326 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4327 4328 if (!qg_list) 4329 return -EINVAL; 4330 4331 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4332 return -EINVAL; 4333 4334 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4335 sum_size += struct_size(list, txqs, list->num_txqs); 4336 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4337 list->num_txqs); 4338 } 4339 4340 if (buf_size != sum_size) 4341 return -EINVAL; 4342 4343 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4344 4345 cmd->num_qgrps = num_qgrps; 4346 4347 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4348 } 4349 4350 /** 4351 * ice_aq_dis_lan_txq 4352 * @hw: pointer to the hardware structure 4353 * @num_qgrps: number of groups in the list 4354 * @qg_list: the list of groups to disable 4355 * @buf_size: the total size of the qg_list buffer in bytes 4356 * @rst_src: if called due to reset, specifies the reset source 4357 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4358 * @cd: pointer to command details structure or NULL 4359 * 4360 * Disable LAN Tx queue (0x0C31) 4361 */ 4362 static int 4363 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4364 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4365 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4366 struct ice_sq_cd *cd) 4367 { 4368 struct ice_aqc_dis_txq_item *item; 4369 struct ice_aqc_dis_txqs *cmd; 4370 struct ice_aq_desc desc; 4371 u16 i, sz = 0; 4372 int status; 4373 4374 cmd = &desc.params.dis_txqs; 4375 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4376 4377 /* qg_list can be NULL only in VM/VF reset flow */ 4378 if (!qg_list && !rst_src) 4379 return -EINVAL; 4380 4381 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4382 return -EINVAL; 4383 4384 cmd->num_entries = num_qgrps; 4385 4386 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 4387 ICE_AQC_Q_DIS_TIMEOUT_M); 4388 4389 switch (rst_src) { 4390 case ICE_VM_RESET: 4391 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4392 cmd->vmvf_and_timeout |= 4393 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 4394 break; 4395 case ICE_VF_RESET: 4396 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4397 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4398 cmd->vmvf_and_timeout |= 4399 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 4400 ICE_AQC_Q_DIS_VMVF_NUM_M); 4401 break; 4402 case ICE_NO_RESET: 4403 default: 4404 break; 4405 } 4406 4407 /* flush pipe on time out */ 4408 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4409 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4410 if (!qg_list) 4411 goto do_aq; 4412 4413 /* set RD bit to indicate that command buffer is provided by the driver 4414 * and it needs to be read by the firmware 4415 */ 4416 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4417 4418 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4419 u16 item_size = struct_size(item, q_id, item->num_qs); 4420 4421 /* If the num of queues is even, add 2 bytes of padding */ 4422 if ((item->num_qs % 2) == 0) 4423 item_size += 2; 4424 4425 sz += item_size; 4426 4427 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4428 } 4429 4430 if (buf_size != sz) 4431 return -EINVAL; 4432 4433 do_aq: 4434 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4435 if (status) { 4436 if (!qg_list) 4437 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4438 vmvf_num, hw->adminq.sq_last_status); 4439 else 4440 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4441 le16_to_cpu(qg_list[0].q_id[0]), 4442 hw->adminq.sq_last_status); 4443 } 4444 return status; 4445 } 4446 4447 /** 4448 * ice_aq_cfg_lan_txq 4449 * @hw: pointer to the hardware structure 4450 * @buf: buffer for command 4451 * @buf_size: size of buffer in bytes 4452 * @num_qs: number of queues being configured 4453 * @oldport: origination lport 4454 * @newport: destination lport 4455 * @cd: pointer to command details structure or NULL 4456 * 4457 * Move/Configure LAN Tx queue (0x0C32) 4458 * 4459 * There is a better AQ command to use for moving nodes, so only coding 4460 * this one for configuring the node. 4461 */ 4462 int 4463 ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf, 4464 u16 buf_size, u16 num_qs, u8 oldport, u8 newport, 4465 struct ice_sq_cd *cd) 4466 { 4467 struct ice_aqc_cfg_txqs *cmd; 4468 struct ice_aq_desc desc; 4469 int status; 4470 4471 cmd = &desc.params.cfg_txqs; 4472 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs); 4473 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4474 4475 if (!buf) 4476 return -EINVAL; 4477 4478 cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG; 4479 cmd->num_qs = num_qs; 4480 cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M); 4481 cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) & 4482 ICE_AQC_Q_CFG_DST_PRT_M; 4483 cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) & 4484 ICE_AQC_Q_CFG_TIMEOUT_M; 4485 cmd->blocked_cgds = 0; 4486 4487 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4488 if (status) 4489 ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n", 4490 hw->adminq.sq_last_status); 4491 return status; 4492 } 4493 4494 /** 4495 * ice_aq_add_rdma_qsets 4496 * @hw: pointer to the hardware structure 4497 * @num_qset_grps: Number of RDMA Qset groups 4498 * @qset_list: list of Qset groups to be added 4499 * @buf_size: size of buffer for indirect command 4500 * @cd: pointer to command details structure or NULL 4501 * 4502 * Add Tx RDMA Qsets (0x0C33) 4503 */ 4504 static int 4505 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4506 struct ice_aqc_add_rdma_qset_data *qset_list, 4507 u16 buf_size, struct ice_sq_cd *cd) 4508 { 4509 struct ice_aqc_add_rdma_qset_data *list; 4510 struct ice_aqc_add_rdma_qset *cmd; 4511 struct ice_aq_desc desc; 4512 u16 i, sum_size = 0; 4513 4514 cmd = &desc.params.add_rdma_qset; 4515 4516 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4517 4518 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4519 return -EINVAL; 4520 4521 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4522 u16 num_qsets = le16_to_cpu(list->num_qsets); 4523 4524 sum_size += struct_size(list, rdma_qsets, num_qsets); 4525 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4526 num_qsets); 4527 } 4528 4529 if (buf_size != sum_size) 4530 return -EINVAL; 4531 4532 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 4533 4534 cmd->num_qset_grps = num_qset_grps; 4535 4536 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4537 } 4538 4539 /* End of FW Admin Queue command wrappers */ 4540 4541 /** 4542 * ice_write_byte - write a byte to a packed context structure 4543 * @src_ctx: the context structure to read from 4544 * @dest_ctx: the context to be written to 4545 * @ce_info: a description of the struct to be filled 4546 */ 4547 static void 4548 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4549 { 4550 u8 src_byte, dest_byte, mask; 4551 u8 *from, *dest; 4552 u16 shift_width; 4553 4554 /* copy from the next struct field */ 4555 from = src_ctx + ce_info->offset; 4556 4557 /* prepare the bits and mask */ 4558 shift_width = ce_info->lsb % 8; 4559 mask = (u8)(BIT(ce_info->width) - 1); 4560 4561 src_byte = *from; 4562 src_byte &= mask; 4563 4564 /* shift to correct alignment */ 4565 mask <<= shift_width; 4566 src_byte <<= shift_width; 4567 4568 /* get the current bits from the target bit string */ 4569 dest = dest_ctx + (ce_info->lsb / 8); 4570 4571 memcpy(&dest_byte, dest, sizeof(dest_byte)); 4572 4573 dest_byte &= ~mask; /* get the bits not changing */ 4574 dest_byte |= src_byte; /* add in the new bits */ 4575 4576 /* put it all back */ 4577 memcpy(dest, &dest_byte, sizeof(dest_byte)); 4578 } 4579 4580 /** 4581 * ice_write_word - write a word to a packed context structure 4582 * @src_ctx: the context structure to read from 4583 * @dest_ctx: the context to be written to 4584 * @ce_info: a description of the struct to be filled 4585 */ 4586 static void 4587 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4588 { 4589 u16 src_word, mask; 4590 __le16 dest_word; 4591 u8 *from, *dest; 4592 u16 shift_width; 4593 4594 /* copy from the next struct field */ 4595 from = src_ctx + ce_info->offset; 4596 4597 /* prepare the bits and mask */ 4598 shift_width = ce_info->lsb % 8; 4599 mask = BIT(ce_info->width) - 1; 4600 4601 /* don't swizzle the bits until after the mask because the mask bits 4602 * will be in a different bit position on big endian machines 4603 */ 4604 src_word = *(u16 *)from; 4605 src_word &= mask; 4606 4607 /* shift to correct alignment */ 4608 mask <<= shift_width; 4609 src_word <<= shift_width; 4610 4611 /* get the current bits from the target bit string */ 4612 dest = dest_ctx + (ce_info->lsb / 8); 4613 4614 memcpy(&dest_word, dest, sizeof(dest_word)); 4615 4616 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 4617 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 4618 4619 /* put it all back */ 4620 memcpy(dest, &dest_word, sizeof(dest_word)); 4621 } 4622 4623 /** 4624 * ice_write_dword - write a dword to a packed context structure 4625 * @src_ctx: the context structure to read from 4626 * @dest_ctx: the context to be written to 4627 * @ce_info: a description of the struct to be filled 4628 */ 4629 static void 4630 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4631 { 4632 u32 src_dword, mask; 4633 __le32 dest_dword; 4634 u8 *from, *dest; 4635 u16 shift_width; 4636 4637 /* copy from the next struct field */ 4638 from = src_ctx + ce_info->offset; 4639 4640 /* prepare the bits and mask */ 4641 shift_width = ce_info->lsb % 8; 4642 4643 /* if the field width is exactly 32 on an x86 machine, then the shift 4644 * operation will not work because the SHL instructions count is masked 4645 * to 5 bits so the shift will do nothing 4646 */ 4647 if (ce_info->width < 32) 4648 mask = BIT(ce_info->width) - 1; 4649 else 4650 mask = (u32)~0; 4651 4652 /* don't swizzle the bits until after the mask because the mask bits 4653 * will be in a different bit position on big endian machines 4654 */ 4655 src_dword = *(u32 *)from; 4656 src_dword &= mask; 4657 4658 /* shift to correct alignment */ 4659 mask <<= shift_width; 4660 src_dword <<= shift_width; 4661 4662 /* get the current bits from the target bit string */ 4663 dest = dest_ctx + (ce_info->lsb / 8); 4664 4665 memcpy(&dest_dword, dest, sizeof(dest_dword)); 4666 4667 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4668 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4669 4670 /* put it all back */ 4671 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4672 } 4673 4674 /** 4675 * ice_write_qword - write a qword to a packed context structure 4676 * @src_ctx: the context structure to read from 4677 * @dest_ctx: the context to be written to 4678 * @ce_info: a description of the struct to be filled 4679 */ 4680 static void 4681 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4682 { 4683 u64 src_qword, mask; 4684 __le64 dest_qword; 4685 u8 *from, *dest; 4686 u16 shift_width; 4687 4688 /* copy from the next struct field */ 4689 from = src_ctx + ce_info->offset; 4690 4691 /* prepare the bits and mask */ 4692 shift_width = ce_info->lsb % 8; 4693 4694 /* if the field width is exactly 64 on an x86 machine, then the shift 4695 * operation will not work because the SHL instructions count is masked 4696 * to 6 bits so the shift will do nothing 4697 */ 4698 if (ce_info->width < 64) 4699 mask = BIT_ULL(ce_info->width) - 1; 4700 else 4701 mask = (u64)~0; 4702 4703 /* don't swizzle the bits until after the mask because the mask bits 4704 * will be in a different bit position on big endian machines 4705 */ 4706 src_qword = *(u64 *)from; 4707 src_qword &= mask; 4708 4709 /* shift to correct alignment */ 4710 mask <<= shift_width; 4711 src_qword <<= shift_width; 4712 4713 /* get the current bits from the target bit string */ 4714 dest = dest_ctx + (ce_info->lsb / 8); 4715 4716 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4717 4718 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4719 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4720 4721 /* put it all back */ 4722 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4723 } 4724 4725 /** 4726 * ice_set_ctx - set context bits in packed structure 4727 * @hw: pointer to the hardware structure 4728 * @src_ctx: pointer to a generic non-packed context structure 4729 * @dest_ctx: pointer to memory for the packed structure 4730 * @ce_info: a description of the structure to be transformed 4731 */ 4732 int 4733 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4734 const struct ice_ctx_ele *ce_info) 4735 { 4736 int f; 4737 4738 for (f = 0; ce_info[f].width; f++) { 4739 /* We have to deal with each element of the FW response 4740 * using the correct size so that we are correct regardless 4741 * of the endianness of the machine. 4742 */ 4743 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4744 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4745 f, ce_info[f].width, ce_info[f].size_of); 4746 continue; 4747 } 4748 switch (ce_info[f].size_of) { 4749 case sizeof(u8): 4750 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4751 break; 4752 case sizeof(u16): 4753 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4754 break; 4755 case sizeof(u32): 4756 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4757 break; 4758 case sizeof(u64): 4759 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4760 break; 4761 default: 4762 return -EINVAL; 4763 } 4764 } 4765 4766 return 0; 4767 } 4768 4769 /** 4770 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4771 * @hw: pointer to the HW struct 4772 * @vsi_handle: software VSI handle 4773 * @tc: TC number 4774 * @q_handle: software queue handle 4775 */ 4776 struct ice_q_ctx * 4777 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4778 { 4779 struct ice_vsi_ctx *vsi; 4780 struct ice_q_ctx *q_ctx; 4781 4782 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4783 if (!vsi) 4784 return NULL; 4785 if (q_handle >= vsi->num_lan_q_entries[tc]) 4786 return NULL; 4787 if (!vsi->lan_q_ctx[tc]) 4788 return NULL; 4789 q_ctx = vsi->lan_q_ctx[tc]; 4790 return &q_ctx[q_handle]; 4791 } 4792 4793 /** 4794 * ice_ena_vsi_txq 4795 * @pi: port information structure 4796 * @vsi_handle: software VSI handle 4797 * @tc: TC number 4798 * @q_handle: software queue handle 4799 * @num_qgrps: Number of added queue groups 4800 * @buf: list of queue groups to be added 4801 * @buf_size: size of buffer for indirect command 4802 * @cd: pointer to command details structure or NULL 4803 * 4804 * This function adds one LAN queue 4805 */ 4806 int 4807 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4808 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4809 struct ice_sq_cd *cd) 4810 { 4811 struct ice_aqc_txsched_elem_data node = { 0 }; 4812 struct ice_sched_node *parent; 4813 struct ice_q_ctx *q_ctx; 4814 struct ice_hw *hw; 4815 int status; 4816 4817 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4818 return -EIO; 4819 4820 if (num_qgrps > 1 || buf->num_txqs > 1) 4821 return -ENOSPC; 4822 4823 hw = pi->hw; 4824 4825 if (!ice_is_vsi_valid(hw, vsi_handle)) 4826 return -EINVAL; 4827 4828 mutex_lock(&pi->sched_lock); 4829 4830 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4831 if (!q_ctx) { 4832 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4833 q_handle); 4834 status = -EINVAL; 4835 goto ena_txq_exit; 4836 } 4837 4838 /* find a parent node */ 4839 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4840 ICE_SCHED_NODE_OWNER_LAN); 4841 if (!parent) { 4842 status = -EINVAL; 4843 goto ena_txq_exit; 4844 } 4845 4846 buf->parent_teid = parent->info.node_teid; 4847 node.parent_teid = parent->info.node_teid; 4848 /* Mark that the values in the "generic" section as valid. The default 4849 * value in the "generic" section is zero. This means that : 4850 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4851 * - 0 priority among siblings, indicated by Bit 1-3. 4852 * - WFQ, indicated by Bit 4. 4853 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4854 * Bit 5-6. 4855 * - Bit 7 is reserved. 4856 * Without setting the generic section as valid in valid_sections, the 4857 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4858 */ 4859 buf->txqs[0].info.valid_sections = 4860 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4861 ICE_AQC_ELEM_VALID_EIR; 4862 buf->txqs[0].info.generic = 0; 4863 buf->txqs[0].info.cir_bw.bw_profile_idx = 4864 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4865 buf->txqs[0].info.cir_bw.bw_alloc = 4866 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4867 buf->txqs[0].info.eir_bw.bw_profile_idx = 4868 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4869 buf->txqs[0].info.eir_bw.bw_alloc = 4870 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4871 4872 /* add the LAN queue */ 4873 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4874 if (status) { 4875 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4876 le16_to_cpu(buf->txqs[0].txq_id), 4877 hw->adminq.sq_last_status); 4878 goto ena_txq_exit; 4879 } 4880 4881 node.node_teid = buf->txqs[0].q_teid; 4882 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4883 q_ctx->q_handle = q_handle; 4884 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4885 4886 /* add a leaf node into scheduler tree queue layer */ 4887 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 4888 if (!status) 4889 status = ice_sched_replay_q_bw(pi, q_ctx); 4890 4891 ena_txq_exit: 4892 mutex_unlock(&pi->sched_lock); 4893 return status; 4894 } 4895 4896 /** 4897 * ice_dis_vsi_txq 4898 * @pi: port information structure 4899 * @vsi_handle: software VSI handle 4900 * @tc: TC number 4901 * @num_queues: number of queues 4902 * @q_handles: pointer to software queue handle array 4903 * @q_ids: pointer to the q_id array 4904 * @q_teids: pointer to queue node teids 4905 * @rst_src: if called due to reset, specifies the reset source 4906 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4907 * @cd: pointer to command details structure or NULL 4908 * 4909 * This function removes queues and their corresponding nodes in SW DB 4910 */ 4911 int 4912 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4913 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4914 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4915 struct ice_sq_cd *cd) 4916 { 4917 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 4918 u16 i, buf_size = __struct_size(qg_list); 4919 struct ice_q_ctx *q_ctx; 4920 int status = -ENOENT; 4921 struct ice_hw *hw; 4922 4923 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4924 return -EIO; 4925 4926 hw = pi->hw; 4927 4928 if (!num_queues) { 4929 /* if queue is disabled already yet the disable queue command 4930 * has to be sent to complete the VF reset, then call 4931 * ice_aq_dis_lan_txq without any queue information 4932 */ 4933 if (rst_src) 4934 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4935 vmvf_num, NULL); 4936 return -EIO; 4937 } 4938 4939 mutex_lock(&pi->sched_lock); 4940 4941 for (i = 0; i < num_queues; i++) { 4942 struct ice_sched_node *node; 4943 4944 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4945 if (!node) 4946 continue; 4947 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4948 if (!q_ctx) { 4949 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4950 q_handles[i]); 4951 continue; 4952 } 4953 if (q_ctx->q_handle != q_handles[i]) { 4954 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4955 q_ctx->q_handle, q_handles[i]); 4956 continue; 4957 } 4958 qg_list->parent_teid = node->info.parent_teid; 4959 qg_list->num_qs = 1; 4960 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4961 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4962 vmvf_num, cd); 4963 4964 if (status) 4965 break; 4966 ice_free_sched_node(pi, node); 4967 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4968 q_ctx->q_teid = ICE_INVAL_TEID; 4969 } 4970 mutex_unlock(&pi->sched_lock); 4971 return status; 4972 } 4973 4974 /** 4975 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4976 * @pi: port information structure 4977 * @vsi_handle: software VSI handle 4978 * @tc_bitmap: TC bitmap 4979 * @maxqs: max queues array per TC 4980 * @owner: LAN or RDMA 4981 * 4982 * This function adds/updates the VSI queues per TC. 4983 */ 4984 static int 4985 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4986 u16 *maxqs, u8 owner) 4987 { 4988 int status = 0; 4989 u8 i; 4990 4991 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4992 return -EIO; 4993 4994 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4995 return -EINVAL; 4996 4997 mutex_lock(&pi->sched_lock); 4998 4999 ice_for_each_traffic_class(i) { 5000 /* configuration is possible only if TC node is present */ 5001 if (!ice_sched_get_tc_node(pi, i)) 5002 continue; 5003 5004 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 5005 ice_is_tc_ena(tc_bitmap, i)); 5006 if (status) 5007 break; 5008 } 5009 5010 mutex_unlock(&pi->sched_lock); 5011 return status; 5012 } 5013 5014 /** 5015 * ice_cfg_vsi_lan - configure VSI LAN queues 5016 * @pi: port information structure 5017 * @vsi_handle: software VSI handle 5018 * @tc_bitmap: TC bitmap 5019 * @max_lanqs: max LAN queues array per TC 5020 * 5021 * This function adds/updates the VSI LAN queues per TC. 5022 */ 5023 int 5024 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 5025 u16 *max_lanqs) 5026 { 5027 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 5028 ICE_SCHED_NODE_OWNER_LAN); 5029 } 5030 5031 /** 5032 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 5033 * @pi: port information structure 5034 * @vsi_handle: software VSI handle 5035 * @tc_bitmap: TC bitmap 5036 * @max_rdmaqs: max RDMA queues array per TC 5037 * 5038 * This function adds/updates the VSI RDMA queues per TC. 5039 */ 5040 int 5041 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5042 u16 *max_rdmaqs) 5043 { 5044 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 5045 ICE_SCHED_NODE_OWNER_RDMA); 5046 } 5047 5048 /** 5049 * ice_ena_vsi_rdma_qset 5050 * @pi: port information structure 5051 * @vsi_handle: software VSI handle 5052 * @tc: TC number 5053 * @rdma_qset: pointer to RDMA Qset 5054 * @num_qsets: number of RDMA Qsets 5055 * @qset_teid: pointer to Qset node TEIDs 5056 * 5057 * This function adds RDMA Qset 5058 */ 5059 int 5060 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 5061 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 5062 { 5063 struct ice_aqc_txsched_elem_data node = { 0 }; 5064 struct ice_aqc_add_rdma_qset_data *buf; 5065 struct ice_sched_node *parent; 5066 struct ice_hw *hw; 5067 u16 i, buf_size; 5068 int ret; 5069 5070 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5071 return -EIO; 5072 hw = pi->hw; 5073 5074 if (!ice_is_vsi_valid(hw, vsi_handle)) 5075 return -EINVAL; 5076 5077 buf_size = struct_size(buf, rdma_qsets, num_qsets); 5078 buf = kzalloc(buf_size, GFP_KERNEL); 5079 if (!buf) 5080 return -ENOMEM; 5081 mutex_lock(&pi->sched_lock); 5082 5083 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 5084 ICE_SCHED_NODE_OWNER_RDMA); 5085 if (!parent) { 5086 ret = -EINVAL; 5087 goto rdma_error_exit; 5088 } 5089 buf->parent_teid = parent->info.node_teid; 5090 node.parent_teid = parent->info.node_teid; 5091 5092 buf->num_qsets = cpu_to_le16(num_qsets); 5093 for (i = 0; i < num_qsets; i++) { 5094 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 5095 buf->rdma_qsets[i].info.valid_sections = 5096 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5097 ICE_AQC_ELEM_VALID_EIR; 5098 buf->rdma_qsets[i].info.generic = 0; 5099 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 5100 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5101 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 5102 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5103 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 5104 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 5105 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 5106 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 5107 } 5108 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 5109 if (ret) { 5110 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 5111 goto rdma_error_exit; 5112 } 5113 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5114 for (i = 0; i < num_qsets; i++) { 5115 node.node_teid = buf->rdma_qsets[i].qset_teid; 5116 ret = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 5117 &node, NULL); 5118 if (ret) 5119 break; 5120 qset_teid[i] = le32_to_cpu(node.node_teid); 5121 } 5122 rdma_error_exit: 5123 mutex_unlock(&pi->sched_lock); 5124 kfree(buf); 5125 return ret; 5126 } 5127 5128 /** 5129 * ice_dis_vsi_rdma_qset - free RDMA resources 5130 * @pi: port_info struct 5131 * @count: number of RDMA Qsets to free 5132 * @qset_teid: TEID of Qset node 5133 * @q_id: list of queue IDs being disabled 5134 */ 5135 int 5136 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 5137 u16 *q_id) 5138 { 5139 DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1); 5140 u16 qg_size = __struct_size(qg_list); 5141 struct ice_hw *hw; 5142 int status = 0; 5143 int i; 5144 5145 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5146 return -EIO; 5147 5148 hw = pi->hw; 5149 5150 mutex_lock(&pi->sched_lock); 5151 5152 for (i = 0; i < count; i++) { 5153 struct ice_sched_node *node; 5154 5155 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 5156 if (!node) 5157 continue; 5158 5159 qg_list->parent_teid = node->info.parent_teid; 5160 qg_list->num_qs = 1; 5161 qg_list->q_id[0] = 5162 cpu_to_le16(q_id[i] | 5163 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 5164 5165 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 5166 ICE_NO_RESET, 0, NULL); 5167 if (status) 5168 break; 5169 5170 ice_free_sched_node(pi, node); 5171 } 5172 5173 mutex_unlock(&pi->sched_lock); 5174 return status; 5175 } 5176 5177 /** 5178 * ice_aq_get_cgu_abilities - get cgu abilities 5179 * @hw: pointer to the HW struct 5180 * @abilities: CGU abilities 5181 * 5182 * Get CGU abilities (0x0C61) 5183 * Return: 0 on success or negative value on failure. 5184 */ 5185 int 5186 ice_aq_get_cgu_abilities(struct ice_hw *hw, 5187 struct ice_aqc_get_cgu_abilities *abilities) 5188 { 5189 struct ice_aq_desc desc; 5190 5191 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities); 5192 return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL); 5193 } 5194 5195 /** 5196 * ice_aq_set_input_pin_cfg - set input pin config 5197 * @hw: pointer to the HW struct 5198 * @input_idx: Input index 5199 * @flags1: Input flags 5200 * @flags2: Input flags 5201 * @freq: Frequency in Hz 5202 * @phase_delay: Delay in ps 5203 * 5204 * Set CGU input config (0x0C62) 5205 * Return: 0 on success or negative value on failure. 5206 */ 5207 int 5208 ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2, 5209 u32 freq, s32 phase_delay) 5210 { 5211 struct ice_aqc_set_cgu_input_config *cmd; 5212 struct ice_aq_desc desc; 5213 5214 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config); 5215 cmd = &desc.params.set_cgu_input_config; 5216 cmd->input_idx = input_idx; 5217 cmd->flags1 = flags1; 5218 cmd->flags2 = flags2; 5219 cmd->freq = cpu_to_le32(freq); 5220 cmd->phase_delay = cpu_to_le32(phase_delay); 5221 5222 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5223 } 5224 5225 /** 5226 * ice_aq_get_input_pin_cfg - get input pin config 5227 * @hw: pointer to the HW struct 5228 * @input_idx: Input index 5229 * @status: Pin status 5230 * @type: Pin type 5231 * @flags1: Input flags 5232 * @flags2: Input flags 5233 * @freq: Frequency in Hz 5234 * @phase_delay: Delay in ps 5235 * 5236 * Get CGU input config (0x0C63) 5237 * Return: 0 on success or negative value on failure. 5238 */ 5239 int 5240 ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type, 5241 u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay) 5242 { 5243 struct ice_aqc_get_cgu_input_config *cmd; 5244 struct ice_aq_desc desc; 5245 int ret; 5246 5247 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config); 5248 cmd = &desc.params.get_cgu_input_config; 5249 cmd->input_idx = input_idx; 5250 5251 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5252 if (!ret) { 5253 if (status) 5254 *status = cmd->status; 5255 if (type) 5256 *type = cmd->type; 5257 if (flags1) 5258 *flags1 = cmd->flags1; 5259 if (flags2) 5260 *flags2 = cmd->flags2; 5261 if (freq) 5262 *freq = le32_to_cpu(cmd->freq); 5263 if (phase_delay) 5264 *phase_delay = le32_to_cpu(cmd->phase_delay); 5265 } 5266 5267 return ret; 5268 } 5269 5270 /** 5271 * ice_aq_set_output_pin_cfg - set output pin config 5272 * @hw: pointer to the HW struct 5273 * @output_idx: Output index 5274 * @flags: Output flags 5275 * @src_sel: Index of DPLL block 5276 * @freq: Output frequency 5277 * @phase_delay: Output phase compensation 5278 * 5279 * Set CGU output config (0x0C64) 5280 * Return: 0 on success or negative value on failure. 5281 */ 5282 int 5283 ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags, 5284 u8 src_sel, u32 freq, s32 phase_delay) 5285 { 5286 struct ice_aqc_set_cgu_output_config *cmd; 5287 struct ice_aq_desc desc; 5288 5289 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config); 5290 cmd = &desc.params.set_cgu_output_config; 5291 cmd->output_idx = output_idx; 5292 cmd->flags = flags; 5293 cmd->src_sel = src_sel; 5294 cmd->freq = cpu_to_le32(freq); 5295 cmd->phase_delay = cpu_to_le32(phase_delay); 5296 5297 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5298 } 5299 5300 /** 5301 * ice_aq_get_output_pin_cfg - get output pin config 5302 * @hw: pointer to the HW struct 5303 * @output_idx: Output index 5304 * @flags: Output flags 5305 * @src_sel: Internal DPLL source 5306 * @freq: Output frequency 5307 * @src_freq: Source frequency 5308 * 5309 * Get CGU output config (0x0C65) 5310 * Return: 0 on success or negative value on failure. 5311 */ 5312 int 5313 ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags, 5314 u8 *src_sel, u32 *freq, u32 *src_freq) 5315 { 5316 struct ice_aqc_get_cgu_output_config *cmd; 5317 struct ice_aq_desc desc; 5318 int ret; 5319 5320 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config); 5321 cmd = &desc.params.get_cgu_output_config; 5322 cmd->output_idx = output_idx; 5323 5324 ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5325 if (!ret) { 5326 if (flags) 5327 *flags = cmd->flags; 5328 if (src_sel) 5329 *src_sel = cmd->src_sel; 5330 if (freq) 5331 *freq = le32_to_cpu(cmd->freq); 5332 if (src_freq) 5333 *src_freq = le32_to_cpu(cmd->src_freq); 5334 } 5335 5336 return ret; 5337 } 5338 5339 /** 5340 * ice_aq_get_cgu_dpll_status - get dpll status 5341 * @hw: pointer to the HW struct 5342 * @dpll_num: DPLL index 5343 * @ref_state: Reference clock state 5344 * @config: current DPLL config 5345 * @dpll_state: current DPLL state 5346 * @phase_offset: Phase offset in ns 5347 * @eec_mode: EEC_mode 5348 * 5349 * Get CGU DPLL status (0x0C66) 5350 * Return: 0 on success or negative value on failure. 5351 */ 5352 int 5353 ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state, 5354 u8 *dpll_state, u8 *config, s64 *phase_offset, 5355 u8 *eec_mode) 5356 { 5357 struct ice_aqc_get_cgu_dpll_status *cmd; 5358 const s64 nsec_per_psec = 1000LL; 5359 struct ice_aq_desc desc; 5360 int status; 5361 5362 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status); 5363 cmd = &desc.params.get_cgu_dpll_status; 5364 cmd->dpll_num = dpll_num; 5365 5366 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5367 if (!status) { 5368 *ref_state = cmd->ref_state; 5369 *dpll_state = cmd->dpll_state; 5370 *config = cmd->config; 5371 *phase_offset = le32_to_cpu(cmd->phase_offset_h); 5372 *phase_offset <<= 32; 5373 *phase_offset += le32_to_cpu(cmd->phase_offset_l); 5374 *phase_offset = div64_s64(sign_extend64(*phase_offset, 47), 5375 nsec_per_psec); 5376 *eec_mode = cmd->eec_mode; 5377 } 5378 5379 return status; 5380 } 5381 5382 /** 5383 * ice_aq_set_cgu_dpll_config - set dpll config 5384 * @hw: pointer to the HW struct 5385 * @dpll_num: DPLL index 5386 * @ref_state: Reference clock state 5387 * @config: DPLL config 5388 * @eec_mode: EEC mode 5389 * 5390 * Set CGU DPLL config (0x0C67) 5391 * Return: 0 on success or negative value on failure. 5392 */ 5393 int 5394 ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state, 5395 u8 config, u8 eec_mode) 5396 { 5397 struct ice_aqc_set_cgu_dpll_config *cmd; 5398 struct ice_aq_desc desc; 5399 5400 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config); 5401 cmd = &desc.params.set_cgu_dpll_config; 5402 cmd->dpll_num = dpll_num; 5403 cmd->ref_state = ref_state; 5404 cmd->config = config; 5405 cmd->eec_mode = eec_mode; 5406 5407 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5408 } 5409 5410 /** 5411 * ice_aq_set_cgu_ref_prio - set input reference priority 5412 * @hw: pointer to the HW struct 5413 * @dpll_num: DPLL index 5414 * @ref_idx: Reference pin index 5415 * @ref_priority: Reference input priority 5416 * 5417 * Set CGU reference priority (0x0C68) 5418 * Return: 0 on success or negative value on failure. 5419 */ 5420 int 5421 ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5422 u8 ref_priority) 5423 { 5424 struct ice_aqc_set_cgu_ref_prio *cmd; 5425 struct ice_aq_desc desc; 5426 5427 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio); 5428 cmd = &desc.params.set_cgu_ref_prio; 5429 cmd->dpll_num = dpll_num; 5430 cmd->ref_idx = ref_idx; 5431 cmd->ref_priority = ref_priority; 5432 5433 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5434 } 5435 5436 /** 5437 * ice_aq_get_cgu_ref_prio - get input reference priority 5438 * @hw: pointer to the HW struct 5439 * @dpll_num: DPLL index 5440 * @ref_idx: Reference pin index 5441 * @ref_prio: Reference input priority 5442 * 5443 * Get CGU reference priority (0x0C69) 5444 * Return: 0 on success or negative value on failure. 5445 */ 5446 int 5447 ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx, 5448 u8 *ref_prio) 5449 { 5450 struct ice_aqc_get_cgu_ref_prio *cmd; 5451 struct ice_aq_desc desc; 5452 int status; 5453 5454 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio); 5455 cmd = &desc.params.get_cgu_ref_prio; 5456 cmd->dpll_num = dpll_num; 5457 cmd->ref_idx = ref_idx; 5458 5459 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5460 if (!status) 5461 *ref_prio = cmd->ref_priority; 5462 5463 return status; 5464 } 5465 5466 /** 5467 * ice_aq_get_cgu_info - get cgu info 5468 * @hw: pointer to the HW struct 5469 * @cgu_id: CGU ID 5470 * @cgu_cfg_ver: CGU config version 5471 * @cgu_fw_ver: CGU firmware version 5472 * 5473 * Get CGU info (0x0C6A) 5474 * Return: 0 on success or negative value on failure. 5475 */ 5476 int 5477 ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver, 5478 u32 *cgu_fw_ver) 5479 { 5480 struct ice_aqc_get_cgu_info *cmd; 5481 struct ice_aq_desc desc; 5482 int status; 5483 5484 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info); 5485 cmd = &desc.params.get_cgu_info; 5486 5487 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5488 if (!status) { 5489 *cgu_id = le32_to_cpu(cmd->cgu_id); 5490 *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver); 5491 *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver); 5492 } 5493 5494 return status; 5495 } 5496 5497 /** 5498 * ice_aq_set_phy_rec_clk_out - set RCLK phy out 5499 * @hw: pointer to the HW struct 5500 * @phy_output: PHY reference clock output pin 5501 * @enable: GPIO state to be applied 5502 * @freq: PHY output frequency 5503 * 5504 * Set phy recovered clock as reference (0x0630) 5505 * Return: 0 on success or negative value on failure. 5506 */ 5507 int 5508 ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable, 5509 u32 *freq) 5510 { 5511 struct ice_aqc_set_phy_rec_clk_out *cmd; 5512 struct ice_aq_desc desc; 5513 int status; 5514 5515 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out); 5516 cmd = &desc.params.set_phy_rec_clk_out; 5517 cmd->phy_output = phy_output; 5518 cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT; 5519 cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN; 5520 cmd->freq = cpu_to_le32(*freq); 5521 5522 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5523 if (!status) 5524 *freq = le32_to_cpu(cmd->freq); 5525 5526 return status; 5527 } 5528 5529 /** 5530 * ice_aq_get_phy_rec_clk_out - get phy recovered signal info 5531 * @hw: pointer to the HW struct 5532 * @phy_output: PHY reference clock output pin 5533 * @port_num: Port number 5534 * @flags: PHY flags 5535 * @node_handle: PHY output frequency 5536 * 5537 * Get PHY recovered clock output info (0x0631) 5538 * Return: 0 on success or negative value on failure. 5539 */ 5540 int 5541 ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num, 5542 u8 *flags, u16 *node_handle) 5543 { 5544 struct ice_aqc_get_phy_rec_clk_out *cmd; 5545 struct ice_aq_desc desc; 5546 int status; 5547 5548 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out); 5549 cmd = &desc.params.get_phy_rec_clk_out; 5550 cmd->phy_output = *phy_output; 5551 5552 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5553 if (!status) { 5554 *phy_output = cmd->phy_output; 5555 if (port_num) 5556 *port_num = cmd->port_num; 5557 if (flags) 5558 *flags = cmd->flags; 5559 if (node_handle) 5560 *node_handle = le16_to_cpu(cmd->node_handle); 5561 } 5562 5563 return status; 5564 } 5565 5566 /** 5567 * ice_aq_get_sensor_reading 5568 * @hw: pointer to the HW struct 5569 * @data: pointer to data to be read from the sensor 5570 * 5571 * Get sensor reading (0x0632) 5572 */ 5573 int ice_aq_get_sensor_reading(struct ice_hw *hw, 5574 struct ice_aqc_get_sensor_reading_resp *data) 5575 { 5576 struct ice_aqc_get_sensor_reading *cmd; 5577 struct ice_aq_desc desc; 5578 int status; 5579 5580 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5581 cmd = &desc.params.get_sensor_reading; 5582 #define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0 5583 #define ICE_INTERNAL_TEMP_SENSOR 0 5584 cmd->sensor = ICE_INTERNAL_TEMP_SENSOR; 5585 cmd->format = ICE_INTERNAL_TEMP_SENSOR_FORMAT; 5586 5587 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5588 if (!status) 5589 memcpy(data, &desc.params.get_sensor_reading_resp, 5590 sizeof(*data)); 5591 5592 return status; 5593 } 5594 5595 /** 5596 * ice_replay_pre_init - replay pre initialization 5597 * @hw: pointer to the HW struct 5598 * 5599 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5600 */ 5601 static int ice_replay_pre_init(struct ice_hw *hw) 5602 { 5603 struct ice_switch_info *sw = hw->switch_info; 5604 u8 i; 5605 5606 /* Delete old entries from replay filter list head if there is any */ 5607 ice_rm_all_sw_replay_rule_info(hw); 5608 /* In start of replay, move entries into replay_rules list, it 5609 * will allow adding rules entries back to filt_rules list, 5610 * which is operational list. 5611 */ 5612 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5613 list_replace_init(&sw->recp_list[i].filt_rules, 5614 &sw->recp_list[i].filt_replay_rules); 5615 ice_sched_replay_agg_vsi_preinit(hw); 5616 5617 return 0; 5618 } 5619 5620 /** 5621 * ice_replay_vsi - replay VSI configuration 5622 * @hw: pointer to the HW struct 5623 * @vsi_handle: driver VSI handle 5624 * 5625 * Restore all VSI configuration after reset. It is required to call this 5626 * function with main VSI first. 5627 */ 5628 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5629 { 5630 int status; 5631 5632 if (!ice_is_vsi_valid(hw, vsi_handle)) 5633 return -EINVAL; 5634 5635 /* Replay pre-initialization if there is any */ 5636 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 5637 status = ice_replay_pre_init(hw); 5638 if (status) 5639 return status; 5640 } 5641 /* Replay per VSI all RSS configurations */ 5642 status = ice_replay_rss_cfg(hw, vsi_handle); 5643 if (status) 5644 return status; 5645 /* Replay per VSI all filters */ 5646 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 5647 if (!status) 5648 status = ice_replay_vsi_agg(hw, vsi_handle); 5649 return status; 5650 } 5651 5652 /** 5653 * ice_replay_post - post replay configuration cleanup 5654 * @hw: pointer to the HW struct 5655 * 5656 * Post replay cleanup. 5657 */ 5658 void ice_replay_post(struct ice_hw *hw) 5659 { 5660 /* Delete old entries from replay filter list head */ 5661 ice_rm_all_sw_replay_rule_info(hw); 5662 ice_sched_replay_agg(hw); 5663 } 5664 5665 /** 5666 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5667 * @hw: ptr to the hardware info 5668 * @reg: offset of 64 bit HW register to read from 5669 * @prev_stat_loaded: bool to specify if previous stats are loaded 5670 * @prev_stat: ptr to previous loaded stat value 5671 * @cur_stat: ptr to current stat value 5672 */ 5673 void 5674 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5675 u64 *prev_stat, u64 *cur_stat) 5676 { 5677 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5678 5679 /* device stats are not reset at PFR, they likely will not be zeroed 5680 * when the driver starts. Thus, save the value from the first read 5681 * without adding to the statistic value so that we report stats which 5682 * count up from zero. 5683 */ 5684 if (!prev_stat_loaded) { 5685 *prev_stat = new_data; 5686 return; 5687 } 5688 5689 /* Calculate the difference between the new and old values, and then 5690 * add it to the software stat value. 5691 */ 5692 if (new_data >= *prev_stat) 5693 *cur_stat += new_data - *prev_stat; 5694 else 5695 /* to manage the potential roll-over */ 5696 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5697 5698 /* Update the previously stored value to prepare for next read */ 5699 *prev_stat = new_data; 5700 } 5701 5702 /** 5703 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5704 * @hw: ptr to the hardware info 5705 * @reg: offset of HW register to read from 5706 * @prev_stat_loaded: bool to specify if previous stats are loaded 5707 * @prev_stat: ptr to previous loaded stat value 5708 * @cur_stat: ptr to current stat value 5709 */ 5710 void 5711 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5712 u64 *prev_stat, u64 *cur_stat) 5713 { 5714 u32 new_data; 5715 5716 new_data = rd32(hw, reg); 5717 5718 /* device stats are not reset at PFR, they likely will not be zeroed 5719 * when the driver starts. Thus, save the value from the first read 5720 * without adding to the statistic value so that we report stats which 5721 * count up from zero. 5722 */ 5723 if (!prev_stat_loaded) { 5724 *prev_stat = new_data; 5725 return; 5726 } 5727 5728 /* Calculate the difference between the new and old values, and then 5729 * add it to the software stat value. 5730 */ 5731 if (new_data >= *prev_stat) 5732 *cur_stat += new_data - *prev_stat; 5733 else 5734 /* to manage the potential roll-over */ 5735 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5736 5737 /* Update the previously stored value to prepare for next read */ 5738 *prev_stat = new_data; 5739 } 5740 5741 /** 5742 * ice_sched_query_elem - query element information from HW 5743 * @hw: pointer to the HW struct 5744 * @node_teid: node TEID to be queried 5745 * @buf: buffer to element information 5746 * 5747 * This function queries HW element information 5748 */ 5749 int 5750 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5751 struct ice_aqc_txsched_elem_data *buf) 5752 { 5753 u16 buf_size, num_elem_ret = 0; 5754 int status; 5755 5756 buf_size = sizeof(*buf); 5757 memset(buf, 0, buf_size); 5758 buf->node_teid = cpu_to_le32(node_teid); 5759 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5760 NULL); 5761 if (status || num_elem_ret != 1) 5762 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5763 return status; 5764 } 5765 5766 /** 5767 * ice_aq_read_i2c 5768 * @hw: pointer to the hw struct 5769 * @topo_addr: topology address for a device to communicate with 5770 * @bus_addr: 7-bit I2C bus address 5771 * @addr: I2C memory address (I2C offset) with up to 16 bits 5772 * @params: I2C parameters: bit [7] - Repeated start, 5773 * bits [6:5] data offset size, 5774 * bit [4] - I2C address type, 5775 * bits [3:0] - data size to read (0-16 bytes) 5776 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5777 * @cd: pointer to command details structure or NULL 5778 * 5779 * Read I2C (0x06E2) 5780 */ 5781 int 5782 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5783 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5784 struct ice_sq_cd *cd) 5785 { 5786 struct ice_aq_desc desc = { 0 }; 5787 struct ice_aqc_i2c *cmd; 5788 u8 data_size; 5789 int status; 5790 5791 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5792 cmd = &desc.params.read_write_i2c; 5793 5794 if (!data) 5795 return -EINVAL; 5796 5797 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5798 5799 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5800 cmd->topo_addr = topo_addr; 5801 cmd->i2c_params = params; 5802 cmd->i2c_addr = addr; 5803 5804 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5805 if (!status) { 5806 struct ice_aqc_read_i2c_resp *resp; 5807 u8 i; 5808 5809 resp = &desc.params.read_i2c_resp; 5810 for (i = 0; i < data_size; i++) { 5811 *data = resp->i2c_data[i]; 5812 data++; 5813 } 5814 } 5815 5816 return status; 5817 } 5818 5819 /** 5820 * ice_aq_write_i2c 5821 * @hw: pointer to the hw struct 5822 * @topo_addr: topology address for a device to communicate with 5823 * @bus_addr: 7-bit I2C bus address 5824 * @addr: I2C memory address (I2C offset) with up to 16 bits 5825 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 5826 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 5827 * @cd: pointer to command details structure or NULL 5828 * 5829 * Write I2C (0x06E3) 5830 * 5831 * * Return: 5832 * * 0 - Successful write to the i2c device 5833 * * -EINVAL - Data size greater than 4 bytes 5834 * * -EIO - FW error 5835 */ 5836 int 5837 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5838 u16 bus_addr, __le16 addr, u8 params, const u8 *data, 5839 struct ice_sq_cd *cd) 5840 { 5841 struct ice_aq_desc desc = { 0 }; 5842 struct ice_aqc_i2c *cmd; 5843 u8 data_size; 5844 5845 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 5846 cmd = &desc.params.read_write_i2c; 5847 5848 data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params); 5849 5850 /* data_size limited to 4 */ 5851 if (data_size > 4) 5852 return -EINVAL; 5853 5854 cmd->i2c_bus_addr = cpu_to_le16(bus_addr); 5855 cmd->topo_addr = topo_addr; 5856 cmd->i2c_params = params; 5857 cmd->i2c_addr = addr; 5858 5859 memcpy(cmd->i2c_data, data, data_size); 5860 5861 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5862 } 5863 5864 /** 5865 * ice_aq_set_gpio 5866 * @hw: pointer to the hw struct 5867 * @gpio_ctrl_handle: GPIO controller node handle 5868 * @pin_idx: IO Number of the GPIO that needs to be set 5869 * @value: SW provide IO value to set in the LSB 5870 * @cd: pointer to command details structure or NULL 5871 * 5872 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 5873 */ 5874 int 5875 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 5876 struct ice_sq_cd *cd) 5877 { 5878 struct ice_aqc_gpio *cmd; 5879 struct ice_aq_desc desc; 5880 5881 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 5882 cmd = &desc.params.read_write_gpio; 5883 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5884 cmd->gpio_num = pin_idx; 5885 cmd->gpio_val = value ? 1 : 0; 5886 5887 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5888 } 5889 5890 /** 5891 * ice_aq_get_gpio 5892 * @hw: pointer to the hw struct 5893 * @gpio_ctrl_handle: GPIO controller node handle 5894 * @pin_idx: IO Number of the GPIO that needs to be set 5895 * @value: IO value read 5896 * @cd: pointer to command details structure or NULL 5897 * 5898 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 5899 * the topology 5900 */ 5901 int 5902 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 5903 bool *value, struct ice_sq_cd *cd) 5904 { 5905 struct ice_aqc_gpio *cmd; 5906 struct ice_aq_desc desc; 5907 int status; 5908 5909 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 5910 cmd = &desc.params.read_write_gpio; 5911 cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle); 5912 cmd->gpio_num = pin_idx; 5913 5914 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5915 if (status) 5916 return status; 5917 5918 *value = !!cmd->gpio_val; 5919 return 0; 5920 } 5921 5922 /** 5923 * ice_is_fw_api_min_ver 5924 * @hw: pointer to the hardware structure 5925 * @maj: major version 5926 * @min: minor version 5927 * @patch: patch version 5928 * 5929 * Checks if the firmware API is minimum version 5930 */ 5931 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 5932 { 5933 if (hw->api_maj_ver == maj) { 5934 if (hw->api_min_ver > min) 5935 return true; 5936 if (hw->api_min_ver == min && hw->api_patch >= patch) 5937 return true; 5938 } else if (hw->api_maj_ver > maj) { 5939 return true; 5940 } 5941 5942 return false; 5943 } 5944 5945 /** 5946 * ice_fw_supports_link_override 5947 * @hw: pointer to the hardware structure 5948 * 5949 * Checks if the firmware supports link override 5950 */ 5951 bool ice_fw_supports_link_override(struct ice_hw *hw) 5952 { 5953 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 5954 ICE_FW_API_LINK_OVERRIDE_MIN, 5955 ICE_FW_API_LINK_OVERRIDE_PATCH); 5956 } 5957 5958 /** 5959 * ice_get_link_default_override 5960 * @ldo: pointer to the link default override struct 5961 * @pi: pointer to the port info struct 5962 * 5963 * Gets the link default override for a port 5964 */ 5965 int 5966 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 5967 struct ice_port_info *pi) 5968 { 5969 u16 i, tlv, tlv_len, tlv_start, buf, offset; 5970 struct ice_hw *hw = pi->hw; 5971 int status; 5972 5973 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 5974 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 5975 if (status) { 5976 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 5977 return status; 5978 } 5979 5980 /* Each port has its own config; calculate for our port */ 5981 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 5982 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 5983 5984 /* link options first */ 5985 status = ice_read_sr_word(hw, tlv_start, &buf); 5986 if (status) { 5987 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 5988 return status; 5989 } 5990 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 5991 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 5992 ICE_LINK_OVERRIDE_PHY_CFG_S; 5993 5994 /* link PHY config */ 5995 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 5996 status = ice_read_sr_word(hw, offset, &buf); 5997 if (status) { 5998 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 5999 return status; 6000 } 6001 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 6002 6003 /* PHY types low */ 6004 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 6005 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 6006 status = ice_read_sr_word(hw, (offset + i), &buf); 6007 if (status) { 6008 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 6009 return status; 6010 } 6011 /* shift 16 bits at a time to fill 64 bits */ 6012 ldo->phy_type_low |= ((u64)buf << (i * 16)); 6013 } 6014 6015 /* PHY types high */ 6016 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 6017 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 6018 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 6019 status = ice_read_sr_word(hw, (offset + i), &buf); 6020 if (status) { 6021 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 6022 return status; 6023 } 6024 /* shift 16 bits at a time to fill 64 bits */ 6025 ldo->phy_type_high |= ((u64)buf << (i * 16)); 6026 } 6027 6028 return status; 6029 } 6030 6031 /** 6032 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 6033 * @caps: get PHY capability data 6034 */ 6035 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 6036 { 6037 if (caps->caps & ICE_AQC_PHY_AN_MODE || 6038 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 6039 ICE_AQC_PHY_AN_EN_CLAUSE73 | 6040 ICE_AQC_PHY_AN_EN_CLAUSE37)) 6041 return true; 6042 6043 return false; 6044 } 6045 6046 /** 6047 * ice_aq_set_lldp_mib - Set the LLDP MIB 6048 * @hw: pointer to the HW struct 6049 * @mib_type: Local, Remote or both Local and Remote MIBs 6050 * @buf: pointer to the caller-supplied buffer to store the MIB block 6051 * @buf_size: size of the buffer (in bytes) 6052 * @cd: pointer to command details structure or NULL 6053 * 6054 * Set the LLDP MIB. (0x0A08) 6055 */ 6056 int 6057 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 6058 struct ice_sq_cd *cd) 6059 { 6060 struct ice_aqc_lldp_set_local_mib *cmd; 6061 struct ice_aq_desc desc; 6062 6063 cmd = &desc.params.lldp_set_mib; 6064 6065 if (buf_size == 0 || !buf) 6066 return -EINVAL; 6067 6068 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 6069 6070 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 6071 desc.datalen = cpu_to_le16(buf_size); 6072 6073 cmd->type = mib_type; 6074 cmd->length = cpu_to_le16(buf_size); 6075 6076 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 6077 } 6078 6079 /** 6080 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 6081 * @hw: pointer to HW struct 6082 */ 6083 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 6084 { 6085 if (hw->mac_type != ICE_MAC_E810) 6086 return false; 6087 6088 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 6089 ICE_FW_API_LLDP_FLTR_MIN, 6090 ICE_FW_API_LLDP_FLTR_PATCH); 6091 } 6092 6093 /** 6094 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6095 * @hw: pointer to HW struct 6096 * @vsi_num: absolute HW index for VSI 6097 * @add: boolean for if adding or removing a filter 6098 */ 6099 int 6100 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6101 { 6102 struct ice_aqc_lldp_filter_ctrl *cmd; 6103 struct ice_aq_desc desc; 6104 6105 cmd = &desc.params.lldp_filter_ctrl; 6106 6107 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 6108 6109 if (add) 6110 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 6111 else 6112 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6113 6114 cmd->vsi_num = cpu_to_le16(vsi_num); 6115 6116 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6117 } 6118 6119 /** 6120 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6121 * @hw: pointer to HW struct 6122 */ 6123 int ice_lldp_execute_pending_mib(struct ice_hw *hw) 6124 { 6125 struct ice_aq_desc desc; 6126 6127 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib); 6128 6129 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6130 } 6131 6132 /** 6133 * ice_fw_supports_report_dflt_cfg 6134 * @hw: pointer to the hardware structure 6135 * 6136 * Checks if the firmware supports report default configuration 6137 */ 6138 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6139 { 6140 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6141 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6142 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6143 } 6144 6145 /* each of the indexes into the following array match the speed of a return 6146 * value from the list of AQ returned speeds like the range: 6147 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6148 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) and maps to BIT(14) in this 6149 * array. The array is defined as 15 elements long because the link_speed 6150 * returned by the firmware is a 16 bit * value, but is indexed 6151 * by [fls(speed) - 1] 6152 */ 6153 static const u32 ice_aq_to_link_speed[] = { 6154 SPEED_10, /* BIT(0) */ 6155 SPEED_100, 6156 SPEED_1000, 6157 SPEED_2500, 6158 SPEED_5000, 6159 SPEED_10000, 6160 SPEED_20000, 6161 SPEED_25000, 6162 SPEED_40000, 6163 SPEED_50000, 6164 SPEED_100000, /* BIT(10) */ 6165 SPEED_200000, 6166 }; 6167 6168 /** 6169 * ice_get_link_speed - get integer speed from table 6170 * @index: array index from fls(aq speed) - 1 6171 * 6172 * Returns: u32 value containing integer speed 6173 */ 6174 u32 ice_get_link_speed(u16 index) 6175 { 6176 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 6177 return 0; 6178 6179 return ice_aq_to_link_speed[index]; 6180 } 6181