1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2023, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "ice_common.h" 33 #include "ice_sched.h" 34 #include "ice_adminq_cmd.h" 35 36 #include "ice_flow.h" 37 #include "ice_switch.h" 38 39 #define ICE_PF_RESET_WAIT_COUNT 500 40 41 static const char * const ice_link_mode_str_low[] = { 42 ice_arr_elem_idx(0, "100BASE_TX"), 43 ice_arr_elem_idx(1, "100M_SGMII"), 44 ice_arr_elem_idx(2, "1000BASE_T"), 45 ice_arr_elem_idx(3, "1000BASE_SX"), 46 ice_arr_elem_idx(4, "1000BASE_LX"), 47 ice_arr_elem_idx(5, "1000BASE_KX"), 48 ice_arr_elem_idx(6, "1G_SGMII"), 49 ice_arr_elem_idx(7, "2500BASE_T"), 50 ice_arr_elem_idx(8, "2500BASE_X"), 51 ice_arr_elem_idx(9, "2500BASE_KX"), 52 ice_arr_elem_idx(10, "5GBASE_T"), 53 ice_arr_elem_idx(11, "5GBASE_KR"), 54 ice_arr_elem_idx(12, "10GBASE_T"), 55 ice_arr_elem_idx(13, "10G_SFI_DA"), 56 ice_arr_elem_idx(14, "10GBASE_SR"), 57 ice_arr_elem_idx(15, "10GBASE_LR"), 58 ice_arr_elem_idx(16, "10GBASE_KR_CR1"), 59 ice_arr_elem_idx(17, "10G_SFI_AOC_ACC"), 60 ice_arr_elem_idx(18, "10G_SFI_C2C"), 61 ice_arr_elem_idx(19, "25GBASE_T"), 62 ice_arr_elem_idx(20, "25GBASE_CR"), 63 ice_arr_elem_idx(21, "25GBASE_CR_S"), 64 ice_arr_elem_idx(22, "25GBASE_CR1"), 65 ice_arr_elem_idx(23, "25GBASE_SR"), 66 ice_arr_elem_idx(24, "25GBASE_LR"), 67 ice_arr_elem_idx(25, "25GBASE_KR"), 68 ice_arr_elem_idx(26, "25GBASE_KR_S"), 69 ice_arr_elem_idx(27, "25GBASE_KR1"), 70 ice_arr_elem_idx(28, "25G_AUI_AOC_ACC"), 71 ice_arr_elem_idx(29, "25G_AUI_C2C"), 72 ice_arr_elem_idx(30, "40GBASE_CR4"), 73 ice_arr_elem_idx(31, "40GBASE_SR4"), 74 ice_arr_elem_idx(32, "40GBASE_LR4"), 75 ice_arr_elem_idx(33, "40GBASE_KR4"), 76 ice_arr_elem_idx(34, "40G_XLAUI_AOC_ACC"), 77 ice_arr_elem_idx(35, "40G_XLAUI"), 78 ice_arr_elem_idx(36, "50GBASE_CR2"), 79 ice_arr_elem_idx(37, "50GBASE_SR2"), 80 ice_arr_elem_idx(38, "50GBASE_LR2"), 81 ice_arr_elem_idx(39, "50GBASE_KR2"), 82 ice_arr_elem_idx(40, "50G_LAUI2_AOC_ACC"), 83 ice_arr_elem_idx(41, "50G_LAUI2"), 84 ice_arr_elem_idx(42, "50G_AUI2_AOC_ACC"), 85 ice_arr_elem_idx(43, "50G_AUI2"), 86 ice_arr_elem_idx(44, "50GBASE_CP"), 87 ice_arr_elem_idx(45, "50GBASE_SR"), 88 ice_arr_elem_idx(46, "50GBASE_FR"), 89 ice_arr_elem_idx(47, "50GBASE_LR"), 90 ice_arr_elem_idx(48, "50GBASE_KR_PAM4"), 91 ice_arr_elem_idx(49, "50G_AUI1_AOC_ACC"), 92 ice_arr_elem_idx(50, "50G_AUI1"), 93 ice_arr_elem_idx(51, "100GBASE_CR4"), 94 ice_arr_elem_idx(52, "100GBASE_SR4"), 95 ice_arr_elem_idx(53, "100GBASE_LR4"), 96 ice_arr_elem_idx(54, "100GBASE_KR4"), 97 ice_arr_elem_idx(55, "100G_CAUI4_AOC_ACC"), 98 ice_arr_elem_idx(56, "100G_CAUI4"), 99 ice_arr_elem_idx(57, "100G_AUI4_AOC_ACC"), 100 ice_arr_elem_idx(58, "100G_AUI4"), 101 ice_arr_elem_idx(59, "100GBASE_CR_PAM4"), 102 ice_arr_elem_idx(60, "100GBASE_KR_PAM4"), 103 ice_arr_elem_idx(61, "100GBASE_CP2"), 104 ice_arr_elem_idx(62, "100GBASE_SR2"), 105 ice_arr_elem_idx(63, "100GBASE_DR"), 106 }; 107 108 static const char * const ice_link_mode_str_high[] = { 109 ice_arr_elem_idx(0, "100GBASE_KR2_PAM4"), 110 ice_arr_elem_idx(1, "100G_CAUI2_AOC_ACC"), 111 ice_arr_elem_idx(2, "100G_CAUI2"), 112 ice_arr_elem_idx(3, "100G_AUI2_AOC_ACC"), 113 ice_arr_elem_idx(4, "100G_AUI2"), 114 }; 115 116 /** 117 * ice_dump_phy_type - helper function to dump phy_type 118 * @hw: pointer to the HW structure 119 * @low: 64 bit value for phy_type_low 120 * @high: 64 bit value for phy_type_high 121 * @prefix: prefix string to differentiate multiple dumps 122 */ 123 static void 124 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 125 { 126 u32 i; 127 128 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, 129 (unsigned long long)low); 130 131 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) { 132 if (low & BIT_ULL(i)) 133 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 134 prefix, i, ice_link_mode_str_low[i]); 135 } 136 137 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, 138 (unsigned long long)high); 139 140 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) { 141 if (high & BIT_ULL(i)) 142 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 143 prefix, i, ice_link_mode_str_high[i]); 144 } 145 } 146 147 /** 148 * ice_set_mac_type - Sets MAC type 149 * @hw: pointer to the HW structure 150 * 151 * This function sets the MAC type of the adapter based on the 152 * vendor ID and device ID stored in the HW structure. 153 */ 154 enum ice_status ice_set_mac_type(struct ice_hw *hw) 155 { 156 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 157 158 if (hw->vendor_id != ICE_INTEL_VENDOR_ID) 159 return ICE_ERR_DEVICE_NOT_SUPPORTED; 160 161 switch (hw->device_id) { 162 case ICE_DEV_ID_E810C_BACKPLANE: 163 case ICE_DEV_ID_E810C_QSFP: 164 case ICE_DEV_ID_E810C_SFP: 165 case ICE_DEV_ID_E810_XXV_BACKPLANE: 166 case ICE_DEV_ID_E810_XXV_QSFP: 167 case ICE_DEV_ID_E810_XXV_SFP: 168 hw->mac_type = ICE_MAC_E810; 169 break; 170 case ICE_DEV_ID_E822C_10G_BASE_T: 171 case ICE_DEV_ID_E822C_BACKPLANE: 172 case ICE_DEV_ID_E822C_QSFP: 173 case ICE_DEV_ID_E822C_SFP: 174 case ICE_DEV_ID_E822C_SGMII: 175 case ICE_DEV_ID_E822L_10G_BASE_T: 176 case ICE_DEV_ID_E822L_BACKPLANE: 177 case ICE_DEV_ID_E822L_SFP: 178 case ICE_DEV_ID_E822L_SGMII: 179 case ICE_DEV_ID_E823L_10G_BASE_T: 180 case ICE_DEV_ID_E823L_1GBE: 181 case ICE_DEV_ID_E823L_BACKPLANE: 182 case ICE_DEV_ID_E823L_QSFP: 183 case ICE_DEV_ID_E823L_SFP: 184 case ICE_DEV_ID_E823C_10G_BASE_T: 185 case ICE_DEV_ID_E823C_BACKPLANE: 186 case ICE_DEV_ID_E823C_QSFP: 187 case ICE_DEV_ID_E823C_SFP: 188 case ICE_DEV_ID_E823C_SGMII: 189 hw->mac_type = ICE_MAC_GENERIC; 190 break; 191 default: 192 hw->mac_type = ICE_MAC_UNKNOWN; 193 break; 194 } 195 196 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 197 return ICE_SUCCESS; 198 } 199 200 /** 201 * ice_is_e810 202 * @hw: pointer to the hardware structure 203 * 204 * returns true if the device is E810 based, false if not. 205 */ 206 bool ice_is_e810(struct ice_hw *hw) 207 { 208 return hw->mac_type == ICE_MAC_E810; 209 } 210 211 /** 212 * ice_is_e810t 213 * @hw: pointer to the hardware structure 214 * 215 * returns true if the device is E810T based, false if not. 216 */ 217 bool ice_is_e810t(struct ice_hw *hw) 218 { 219 switch (hw->device_id) { 220 case ICE_DEV_ID_E810C_SFP: 221 switch (hw->subsystem_device_id) { 222 case ICE_SUBDEV_ID_E810T: 223 case ICE_SUBDEV_ID_E810T2: 224 case ICE_SUBDEV_ID_E810T3: 225 case ICE_SUBDEV_ID_E810T4: 226 case ICE_SUBDEV_ID_E810T5: 227 case ICE_SUBDEV_ID_E810T7: 228 return true; 229 } 230 break; 231 case ICE_DEV_ID_E810C_QSFP: 232 switch (hw->subsystem_device_id) { 233 case ICE_SUBDEV_ID_E810T2: 234 case ICE_SUBDEV_ID_E810T5: 235 case ICE_SUBDEV_ID_E810T6: 236 return true; 237 } 238 break; 239 default: 240 break; 241 } 242 243 return false; 244 } 245 246 /** 247 * ice_is_e823 248 * @hw: pointer to the hardware structure 249 * 250 * returns true if the device is E823-L or E823-C based, false if not. 251 */ 252 bool ice_is_e823(struct ice_hw *hw) 253 { 254 switch (hw->device_id) { 255 case ICE_DEV_ID_E823L_BACKPLANE: 256 case ICE_DEV_ID_E823L_SFP: 257 case ICE_DEV_ID_E823L_10G_BASE_T: 258 case ICE_DEV_ID_E823L_1GBE: 259 case ICE_DEV_ID_E823L_QSFP: 260 case ICE_DEV_ID_E823C_BACKPLANE: 261 case ICE_DEV_ID_E823C_QSFP: 262 case ICE_DEV_ID_E823C_SFP: 263 case ICE_DEV_ID_E823C_10G_BASE_T: 264 case ICE_DEV_ID_E823C_SGMII: 265 return true; 266 default: 267 return false; 268 } 269 } 270 271 /** 272 * ice_clear_pf_cfg - Clear PF configuration 273 * @hw: pointer to the hardware structure 274 * 275 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 276 * configuration, flow director filters, etc.). 277 */ 278 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 279 { 280 struct ice_aq_desc desc; 281 282 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 283 284 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 285 } 286 287 /** 288 * ice_aq_manage_mac_read - manage MAC address read command 289 * @hw: pointer to the HW struct 290 * @buf: a virtual buffer to hold the manage MAC read response 291 * @buf_size: Size of the virtual buffer 292 * @cd: pointer to command details structure or NULL 293 * 294 * This function is used to return per PF station MAC address (0x0107). 295 * NOTE: Upon successful completion of this command, MAC address information 296 * is returned in user specified buffer. Please interpret user specified 297 * buffer as "manage_mac_read" response. 298 * Response such as various MAC addresses are stored in HW struct (port.mac) 299 * ice_discover_dev_caps is expected to be called before this function is 300 * called. 301 */ 302 enum ice_status 303 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 304 struct ice_sq_cd *cd) 305 { 306 struct ice_aqc_manage_mac_read_resp *resp; 307 struct ice_aqc_manage_mac_read *cmd; 308 struct ice_aq_desc desc; 309 enum ice_status status; 310 u16 flags; 311 u8 i; 312 313 cmd = &desc.params.mac_read; 314 315 if (buf_size < sizeof(*resp)) 316 return ICE_ERR_BUF_TOO_SHORT; 317 318 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 319 320 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 321 if (status) 322 return status; 323 324 resp = (struct ice_aqc_manage_mac_read_resp *)buf; 325 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 326 327 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 328 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 329 return ICE_ERR_CFG; 330 } 331 332 /* A single port can report up to two (LAN and WoL) addresses */ 333 for (i = 0; i < cmd->num_addr; i++) 334 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 335 ice_memcpy(hw->port_info->mac.lan_addr, 336 resp[i].mac_addr, ETH_ALEN, 337 ICE_NONDMA_TO_NONDMA); 338 ice_memcpy(hw->port_info->mac.perm_addr, 339 resp[i].mac_addr, 340 ETH_ALEN, ICE_NONDMA_TO_NONDMA); 341 break; 342 } 343 return ICE_SUCCESS; 344 } 345 346 /** 347 * ice_aq_get_phy_caps - returns PHY capabilities 348 * @pi: port information structure 349 * @qual_mods: report qualified modules 350 * @report_mode: report mode capabilities 351 * @pcaps: structure for PHY capabilities to be filled 352 * @cd: pointer to command details structure or NULL 353 * 354 * Returns the various PHY capabilities supported on the Port (0x0600) 355 */ 356 enum ice_status 357 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 358 struct ice_aqc_get_phy_caps_data *pcaps, 359 struct ice_sq_cd *cd) 360 { 361 struct ice_aqc_get_phy_caps *cmd; 362 u16 pcaps_size = sizeof(*pcaps); 363 struct ice_aq_desc desc; 364 enum ice_status status; 365 const char *prefix; 366 struct ice_hw *hw; 367 368 cmd = &desc.params.get_phy; 369 370 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 371 return ICE_ERR_PARAM; 372 hw = pi->hw; 373 374 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 375 !ice_fw_supports_report_dflt_cfg(hw)) 376 return ICE_ERR_PARAM; 377 378 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 379 380 if (qual_mods) 381 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM); 382 383 cmd->param0 |= CPU_TO_LE16(report_mode); 384 385 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 386 387 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 388 389 switch (report_mode) { 390 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 391 prefix = "phy_caps_media"; 392 break; 393 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 394 prefix = "phy_caps_no_media"; 395 break; 396 case ICE_AQC_REPORT_ACTIVE_CFG: 397 prefix = "phy_caps_active"; 398 break; 399 case ICE_AQC_REPORT_DFLT_CFG: 400 prefix = "phy_caps_default"; 401 break; 402 default: 403 prefix = "phy_caps_invalid"; 404 } 405 406 ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low), 407 LE64_TO_CPU(pcaps->phy_type_high), prefix); 408 409 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 410 prefix, report_mode); 411 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 412 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 413 pcaps->low_power_ctrl_an); 414 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 415 pcaps->eee_cap); 416 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 417 pcaps->eeer_value); 418 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 419 pcaps->link_fec_options); 420 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 421 prefix, pcaps->module_compliance_enforcement); 422 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 423 prefix, pcaps->extended_compliance_code); 424 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 425 pcaps->module_type[0]); 426 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 427 pcaps->module_type[1]); 428 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 429 pcaps->module_type[2]); 430 431 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 432 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low); 433 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high); 434 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 435 sizeof(pi->phy.link_info.module_type), 436 ICE_NONDMA_TO_NONDMA); 437 } 438 439 return status; 440 } 441 442 /** 443 * ice_aq_get_netlist_node 444 * @hw: pointer to the hw struct 445 * @cmd: get_link_topo AQ structure 446 * @node_part_number: output node part number if node found 447 * @node_handle: output node handle parameter if node found 448 */ 449 enum ice_status 450 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 451 u8 *node_part_number, u16 *node_handle) 452 { 453 struct ice_aq_desc desc; 454 455 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 456 desc.params.get_link_topo = *cmd; 457 458 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 459 return ICE_ERR_NOT_SUPPORTED; 460 461 if (node_handle) 462 *node_handle = 463 LE16_TO_CPU(desc.params.get_link_topo.addr.handle); 464 if (node_part_number) 465 *node_part_number = desc.params.get_link_topo.node_part_num; 466 467 return ICE_SUCCESS; 468 } 469 470 #define MAX_NETLIST_SIZE 10 471 /** 472 * ice_find_netlist_node 473 * @hw: pointer to the hw struct 474 * @node_type_ctx: type of netlist node to look for 475 * @node_part_number: node part number to look for 476 * @node_handle: output parameter if node found - optional 477 * 478 * Find and return the node handle for a given node type and part number in the 479 * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST 480 * otherwise. If node_handle provided, it would be set to found node handle. 481 */ 482 enum ice_status 483 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number, 484 u16 *node_handle) 485 { 486 struct ice_aqc_get_link_topo cmd; 487 u8 rec_node_part_number; 488 u16 rec_node_handle; 489 u8 idx; 490 491 for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) { 492 enum ice_status status; 493 494 memset(&cmd, 0, sizeof(cmd)); 495 496 cmd.addr.topo_params.node_type_ctx = 497 (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S); 498 cmd.addr.topo_params.index = idx; 499 500 status = ice_aq_get_netlist_node(hw, &cmd, 501 &rec_node_part_number, 502 &rec_node_handle); 503 if (status) 504 return status; 505 506 if (rec_node_part_number == node_part_number) { 507 if (node_handle) 508 *node_handle = rec_node_handle; 509 return ICE_SUCCESS; 510 } 511 } 512 513 return ICE_ERR_DOES_NOT_EXIST; 514 } 515 516 /** 517 * ice_is_media_cage_present 518 * @pi: port information structure 519 * 520 * Returns true if media cage is present, else false. If no cage, then 521 * media type is backplane or BASE-T. 522 */ 523 static bool ice_is_media_cage_present(struct ice_port_info *pi) 524 { 525 struct ice_aqc_get_link_topo *cmd; 526 struct ice_aq_desc desc; 527 528 cmd = &desc.params.get_link_topo; 529 530 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 531 532 cmd->addr.topo_params.node_type_ctx = 533 (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 534 ICE_AQC_LINK_TOPO_NODE_CTX_S); 535 536 /* set node type */ 537 cmd->addr.topo_params.node_type_ctx |= 538 (ICE_AQC_LINK_TOPO_NODE_TYPE_M & 539 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE); 540 541 /* Node type cage can be used to determine if cage is present. If AQC 542 * returns error (ENOENT), then no cage present. If no cage present then 543 * connection type is backplane or BASE-T. 544 */ 545 return ice_aq_get_netlist_node(pi->hw, cmd, NULL, NULL); 546 } 547 548 /** 549 * ice_get_media_type - Gets media type 550 * @pi: port information structure 551 */ 552 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 553 { 554 struct ice_link_status *hw_link_info; 555 556 if (!pi) 557 return ICE_MEDIA_UNKNOWN; 558 559 hw_link_info = &pi->phy.link_info; 560 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 561 /* If more than one media type is selected, report unknown */ 562 return ICE_MEDIA_UNKNOWN; 563 564 if (hw_link_info->phy_type_low) { 565 /* 1G SGMII is a special case where some DA cable PHYs 566 * may show this as an option when it really shouldn't 567 * be since SGMII is meant to be between a MAC and a PHY 568 * in a backplane. Try to detect this case and handle it 569 */ 570 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 571 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 572 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 573 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 574 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 575 return ICE_MEDIA_DA; 576 577 switch (hw_link_info->phy_type_low) { 578 case ICE_PHY_TYPE_LOW_1000BASE_SX: 579 case ICE_PHY_TYPE_LOW_1000BASE_LX: 580 case ICE_PHY_TYPE_LOW_10GBASE_SR: 581 case ICE_PHY_TYPE_LOW_10GBASE_LR: 582 case ICE_PHY_TYPE_LOW_25GBASE_SR: 583 case ICE_PHY_TYPE_LOW_25GBASE_LR: 584 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 585 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 586 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 587 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 588 case ICE_PHY_TYPE_LOW_50GBASE_SR: 589 case ICE_PHY_TYPE_LOW_50GBASE_FR: 590 case ICE_PHY_TYPE_LOW_50GBASE_LR: 591 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 592 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 593 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 594 case ICE_PHY_TYPE_LOW_100GBASE_DR: 595 return ICE_MEDIA_FIBER; 596 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 597 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 598 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 599 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 600 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 601 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 602 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 603 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 604 return ICE_MEDIA_FIBER; 605 case ICE_PHY_TYPE_LOW_100BASE_TX: 606 case ICE_PHY_TYPE_LOW_1000BASE_T: 607 case ICE_PHY_TYPE_LOW_2500BASE_T: 608 case ICE_PHY_TYPE_LOW_5GBASE_T: 609 case ICE_PHY_TYPE_LOW_10GBASE_T: 610 case ICE_PHY_TYPE_LOW_25GBASE_T: 611 return ICE_MEDIA_BASET; 612 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 613 case ICE_PHY_TYPE_LOW_25GBASE_CR: 614 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 615 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 616 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 617 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 618 case ICE_PHY_TYPE_LOW_50GBASE_CP: 619 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 620 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 621 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 622 return ICE_MEDIA_DA; 623 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 624 case ICE_PHY_TYPE_LOW_40G_XLAUI: 625 case ICE_PHY_TYPE_LOW_50G_LAUI2: 626 case ICE_PHY_TYPE_LOW_50G_AUI2: 627 case ICE_PHY_TYPE_LOW_50G_AUI1: 628 case ICE_PHY_TYPE_LOW_100G_AUI4: 629 case ICE_PHY_TYPE_LOW_100G_CAUI4: 630 if (ice_is_media_cage_present(pi)) 631 return ICE_MEDIA_AUI; 632 /* fall-through */ 633 case ICE_PHY_TYPE_LOW_1000BASE_KX: 634 case ICE_PHY_TYPE_LOW_2500BASE_KX: 635 case ICE_PHY_TYPE_LOW_2500BASE_X: 636 case ICE_PHY_TYPE_LOW_5GBASE_KR: 637 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 638 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 639 case ICE_PHY_TYPE_LOW_25GBASE_KR: 640 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 641 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 642 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 643 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 644 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 645 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 646 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 647 return ICE_MEDIA_BACKPLANE; 648 } 649 } else { 650 switch (hw_link_info->phy_type_high) { 651 case ICE_PHY_TYPE_HIGH_100G_AUI2: 652 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 653 if (ice_is_media_cage_present(pi)) 654 return ICE_MEDIA_AUI; 655 /* fall-through */ 656 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 657 return ICE_MEDIA_BACKPLANE; 658 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 659 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 660 return ICE_MEDIA_FIBER; 661 } 662 } 663 return ICE_MEDIA_UNKNOWN; 664 } 665 666 #define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1 667 668 /** 669 * ice_aq_get_link_info 670 * @pi: port information structure 671 * @ena_lse: enable/disable LinkStatusEvent reporting 672 * @link: pointer to link status structure - optional 673 * @cd: pointer to command details structure or NULL 674 * 675 * Get Link Status (0x607). Returns the link status of the adapter. 676 */ 677 enum ice_status 678 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 679 struct ice_link_status *link, struct ice_sq_cd *cd) 680 { 681 struct ice_aqc_get_link_status_data link_data = { 0 }; 682 struct ice_aqc_get_link_status *resp; 683 struct ice_link_status *li_old, *li; 684 enum ice_media_type *hw_media_type; 685 struct ice_fc_info *hw_fc_info; 686 bool tx_pause, rx_pause; 687 struct ice_aq_desc desc; 688 enum ice_status status; 689 struct ice_hw *hw; 690 u16 cmd_flags; 691 692 if (!pi) 693 return ICE_ERR_PARAM; 694 hw = pi->hw; 695 696 li_old = &pi->phy.link_info_old; 697 hw_media_type = &pi->phy.media_type; 698 li = &pi->phy.link_info; 699 hw_fc_info = &pi->fc; 700 701 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 702 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 703 resp = &desc.params.get_link_status; 704 resp->cmd_flags = CPU_TO_LE16(cmd_flags); 705 resp->lport_num = pi->lport; 706 707 status = ice_aq_send_cmd(hw, &desc, &link_data, 708 ice_get_link_status_datalen(hw), cd); 709 if (status != ICE_SUCCESS) 710 return status; 711 712 /* save off old link status information */ 713 *li_old = *li; 714 715 /* update current link status information */ 716 li->link_speed = LE16_TO_CPU(link_data.link_speed); 717 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low); 718 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high); 719 *hw_media_type = ice_get_media_type(pi); 720 li->link_info = link_data.link_info; 721 li->link_cfg_err = link_data.link_cfg_err; 722 li->an_info = link_data.an_info; 723 li->ext_info = link_data.ext_info; 724 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size); 725 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 726 li->topo_media_conflict = link_data.topo_media_conflict; 727 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 728 ICE_AQ_CFG_PACING_TYPE_M); 729 730 /* update fc info */ 731 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 732 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 733 if (tx_pause && rx_pause) 734 hw_fc_info->current_mode = ICE_FC_FULL; 735 else if (tx_pause) 736 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 737 else if (rx_pause) 738 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 739 else 740 hw_fc_info->current_mode = ICE_FC_NONE; 741 742 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED)); 743 744 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 745 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 746 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 747 (unsigned long long)li->phy_type_low); 748 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 749 (unsigned long long)li->phy_type_high); 750 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 751 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 752 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 753 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 754 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 755 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 756 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 757 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 758 li->max_frame_size); 759 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 760 761 /* save link status information */ 762 if (link) 763 *link = *li; 764 765 /* flag cleared so calling functions don't call AQ again */ 766 pi->phy.get_link_info = false; 767 768 return ICE_SUCCESS; 769 } 770 771 /** 772 * ice_fill_tx_timer_and_fc_thresh 773 * @hw: pointer to the HW struct 774 * @cmd: pointer to MAC cfg structure 775 * 776 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 777 * descriptor 778 */ 779 static void 780 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 781 struct ice_aqc_set_mac_cfg *cmd) 782 { 783 u16 fc_thres_val, tx_timer_val; 784 u32 val; 785 786 /* We read back the transmit timer and fc threshold value of 787 * LFC. Thus, we will use index = 788 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 789 * 790 * Also, because we are operating on transmit timer and fc 791 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 792 */ 793 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 794 795 /* Retrieve the transmit timer */ 796 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 797 tx_timer_val = val & 798 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 799 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val); 800 801 /* Retrieve the fc threshold */ 802 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 803 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 804 805 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val); 806 } 807 808 /** 809 * ice_aq_set_mac_cfg 810 * @hw: pointer to the HW struct 811 * @max_frame_size: Maximum Frame Size to be supported 812 * @auto_drop: Tell HW to drop packets if TC queue is blocked 813 * @cd: pointer to command details structure or NULL 814 * 815 * Set MAC configuration (0x0603) 816 */ 817 enum ice_status 818 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop, 819 struct ice_sq_cd *cd) 820 { 821 struct ice_aqc_set_mac_cfg *cmd; 822 struct ice_aq_desc desc; 823 824 cmd = &desc.params.set_mac_cfg; 825 826 if (max_frame_size == 0) 827 return ICE_ERR_PARAM; 828 829 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 830 831 cmd->max_frame_size = CPU_TO_LE16(max_frame_size); 832 833 if (ice_is_fw_auto_drop_supported(hw) && auto_drop) 834 cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS; 835 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 836 837 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 838 } 839 840 /** 841 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 842 * @hw: pointer to the HW struct 843 */ 844 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 845 { 846 struct ice_switch_info *sw; 847 enum ice_status status; 848 849 hw->switch_info = (struct ice_switch_info *) 850 ice_malloc(hw, sizeof(*hw->switch_info)); 851 852 sw = hw->switch_info; 853 854 if (!sw) 855 return ICE_ERR_NO_MEMORY; 856 857 INIT_LIST_HEAD(&sw->vsi_list_map_head); 858 sw->prof_res_bm_init = 0; 859 860 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list); 861 if (status) { 862 ice_free(hw, hw->switch_info); 863 return status; 864 } 865 return ICE_SUCCESS; 866 } 867 868 /** 869 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct 870 * @hw: pointer to the HW struct 871 * @sw: pointer to switch info struct for which function clears filters 872 */ 873 static void 874 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw) 875 { 876 struct ice_vsi_list_map_info *v_pos_map; 877 struct ice_vsi_list_map_info *v_tmp_map; 878 struct ice_sw_recipe *recps; 879 u8 i; 880 881 if (!sw) 882 return; 883 884 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 885 ice_vsi_list_map_info, list_entry) { 886 LIST_DEL(&v_pos_map->list_entry); 887 ice_free(hw, v_pos_map); 888 } 889 recps = sw->recp_list; 890 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 891 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 892 893 recps[i].root_rid = i; 894 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry, 895 &recps[i].rg_list, ice_recp_grp_entry, 896 l_entry) { 897 LIST_DEL(&rg_entry->l_entry); 898 ice_free(hw, rg_entry); 899 } 900 901 if (recps[i].adv_rule) { 902 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 903 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 904 905 ice_destroy_lock(&recps[i].filt_rule_lock); 906 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, 907 &recps[i].filt_rules, 908 ice_adv_fltr_mgmt_list_entry, 909 list_entry) { 910 LIST_DEL(&lst_itr->list_entry); 911 ice_free(hw, lst_itr->lkups); 912 ice_free(hw, lst_itr); 913 } 914 } else { 915 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 916 917 ice_destroy_lock(&recps[i].filt_rule_lock); 918 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, 919 &recps[i].filt_rules, 920 ice_fltr_mgmt_list_entry, 921 list_entry) { 922 LIST_DEL(&lst_itr->list_entry); 923 ice_free(hw, lst_itr); 924 } 925 } 926 if (recps[i].root_buf) 927 ice_free(hw, recps[i].root_buf); 928 } 929 ice_rm_sw_replay_rule_info(hw, sw); 930 ice_free(hw, sw->recp_list); 931 ice_free(hw, sw); 932 } 933 934 /** 935 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 936 * @hw: pointer to the HW struct 937 */ 938 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 939 { 940 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info); 941 } 942 943 /** 944 * ice_get_itr_intrl_gran 945 * @hw: pointer to the HW struct 946 * 947 * Determines the ITR/INTRL granularities based on the maximum aggregate 948 * bandwidth according to the device's configuration during power-on. 949 */ 950 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 951 { 952 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 953 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 954 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 955 956 switch (max_agg_bw) { 957 case ICE_MAX_AGG_BW_200G: 958 case ICE_MAX_AGG_BW_100G: 959 case ICE_MAX_AGG_BW_50G: 960 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 961 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 962 break; 963 case ICE_MAX_AGG_BW_25G: 964 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 965 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 966 break; 967 } 968 } 969 970 /** 971 * ice_print_rollback_msg - print FW rollback message 972 * @hw: pointer to the hardware structure 973 */ 974 void ice_print_rollback_msg(struct ice_hw *hw) 975 { 976 char nvm_str[ICE_NVM_VER_LEN] = { 0 }; 977 struct ice_orom_info *orom; 978 struct ice_nvm_info *nvm; 979 980 orom = &hw->flash.orom; 981 nvm = &hw->flash.nvm; 982 983 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d", 984 nvm->major, nvm->minor, nvm->eetrack, orom->major, 985 orom->build, orom->patch); 986 ice_warn(hw, 987 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n", 988 nvm_str, hw->fw_maj_ver, hw->fw_min_ver); 989 } 990 991 /** 992 * ice_set_umac_shared 993 * @hw: pointer to the hw struct 994 * 995 * Set boolean flag to allow unicast MAC sharing 996 */ 997 void ice_set_umac_shared(struct ice_hw *hw) 998 { 999 hw->umac_shared = true; 1000 } 1001 1002 /** 1003 * ice_init_hw - main hardware initialization routine 1004 * @hw: pointer to the hardware structure 1005 */ 1006 enum ice_status ice_init_hw(struct ice_hw *hw) 1007 { 1008 struct ice_aqc_get_phy_caps_data *pcaps; 1009 enum ice_status status; 1010 u16 mac_buf_len; 1011 void *mac_buf; 1012 1013 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1014 1015 /* Set MAC type based on DeviceID */ 1016 status = ice_set_mac_type(hw); 1017 if (status) 1018 return status; 1019 1020 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 1021 PF_FUNC_RID_FUNCTION_NUMBER_M) >> 1022 PF_FUNC_RID_FUNCTION_NUMBER_S; 1023 1024 status = ice_reset(hw, ICE_RESET_PFR); 1025 if (status) 1026 return status; 1027 ice_get_itr_intrl_gran(hw); 1028 1029 status = ice_create_all_ctrlq(hw); 1030 if (status) 1031 goto err_unroll_cqinit; 1032 1033 ice_fwlog_set_support_ena(hw); 1034 status = ice_fwlog_set(hw, &hw->fwlog_cfg); 1035 if (status) { 1036 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n", 1037 status); 1038 } else { 1039 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) { 1040 status = ice_fwlog_register(hw); 1041 if (status) 1042 ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n", 1043 status); 1044 } else { 1045 status = ice_fwlog_unregister(hw); 1046 if (status) 1047 ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n", 1048 status); 1049 } 1050 } 1051 1052 status = ice_init_nvm(hw); 1053 if (status) 1054 goto err_unroll_cqinit; 1055 1056 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK) 1057 ice_print_rollback_msg(hw); 1058 1059 status = ice_clear_pf_cfg(hw); 1060 if (status) 1061 goto err_unroll_cqinit; 1062 1063 ice_clear_pxe_mode(hw); 1064 1065 status = ice_get_caps(hw); 1066 if (status) 1067 goto err_unroll_cqinit; 1068 1069 hw->port_info = (struct ice_port_info *) 1070 ice_malloc(hw, sizeof(*hw->port_info)); 1071 if (!hw->port_info) { 1072 status = ICE_ERR_NO_MEMORY; 1073 goto err_unroll_cqinit; 1074 } 1075 1076 /* set the back pointer to HW */ 1077 hw->port_info->hw = hw; 1078 1079 /* Initialize port_info struct with switch configuration data */ 1080 status = ice_get_initial_sw_cfg(hw); 1081 if (status) 1082 goto err_unroll_alloc; 1083 1084 hw->evb_veb = true; 1085 /* Query the allocated resources for Tx scheduler */ 1086 status = ice_sched_query_res_alloc(hw); 1087 if (status) { 1088 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1089 goto err_unroll_alloc; 1090 } 1091 ice_sched_get_psm_clk_freq(hw); 1092 1093 /* Initialize port_info struct with scheduler data */ 1094 status = ice_sched_init_port(hw->port_info); 1095 if (status) 1096 goto err_unroll_sched; 1097 pcaps = (struct ice_aqc_get_phy_caps_data *) 1098 ice_malloc(hw, sizeof(*pcaps)); 1099 if (!pcaps) { 1100 status = ICE_ERR_NO_MEMORY; 1101 goto err_unroll_sched; 1102 } 1103 1104 /* Initialize port_info struct with PHY capabilities */ 1105 status = ice_aq_get_phy_caps(hw->port_info, false, 1106 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); 1107 ice_free(hw, pcaps); 1108 if (status) 1109 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n", 1110 status); 1111 1112 /* Initialize port_info struct with link information */ 1113 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1114 if (status) 1115 goto err_unroll_sched; 1116 /* need a valid SW entry point to build a Tx tree */ 1117 if (!hw->sw_entry_point_layer) { 1118 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1119 status = ICE_ERR_CFG; 1120 goto err_unroll_sched; 1121 } 1122 INIT_LIST_HEAD(&hw->agg_list); 1123 /* Initialize max burst size */ 1124 if (!hw->max_burst_size) 1125 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1126 status = ice_init_fltr_mgmt_struct(hw); 1127 if (status) 1128 goto err_unroll_sched; 1129 1130 /* Get MAC information */ 1131 1132 /* A single port can report up to two (LAN and WoL) addresses */ 1133 mac_buf = ice_calloc(hw, 2, 1134 sizeof(struct ice_aqc_manage_mac_read_resp)); 1135 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1136 1137 if (!mac_buf) { 1138 status = ICE_ERR_NO_MEMORY; 1139 goto err_unroll_fltr_mgmt_struct; 1140 } 1141 1142 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1143 ice_free(hw, mac_buf); 1144 1145 if (status) 1146 goto err_unroll_fltr_mgmt_struct; 1147 1148 /* enable jumbo frame support at MAC level */ 1149 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false, 1150 NULL); 1151 if (status) 1152 goto err_unroll_fltr_mgmt_struct; 1153 1154 status = ice_init_hw_tbls(hw); 1155 if (status) 1156 goto err_unroll_fltr_mgmt_struct; 1157 ice_init_lock(&hw->tnl_lock); 1158 1159 return ICE_SUCCESS; 1160 1161 err_unroll_fltr_mgmt_struct: 1162 ice_cleanup_fltr_mgmt_struct(hw); 1163 err_unroll_sched: 1164 ice_sched_cleanup_all(hw); 1165 err_unroll_alloc: 1166 ice_free(hw, hw->port_info); 1167 hw->port_info = NULL; 1168 err_unroll_cqinit: 1169 ice_destroy_all_ctrlq(hw); 1170 return status; 1171 } 1172 1173 /** 1174 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1175 * @hw: pointer to the hardware structure 1176 * 1177 * This should be called only during nominal operation, not as a result of 1178 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1179 * applicable initializations if it fails for any reason. 1180 */ 1181 void ice_deinit_hw(struct ice_hw *hw) 1182 { 1183 ice_cleanup_fltr_mgmt_struct(hw); 1184 1185 ice_sched_cleanup_all(hw); 1186 ice_sched_clear_agg(hw); 1187 ice_free_seg(hw); 1188 ice_free_hw_tbls(hw); 1189 ice_destroy_lock(&hw->tnl_lock); 1190 1191 if (hw->port_info) { 1192 ice_free(hw, hw->port_info); 1193 hw->port_info = NULL; 1194 } 1195 1196 ice_destroy_all_ctrlq(hw); 1197 1198 /* Clear VSI contexts if not already cleared */ 1199 ice_clear_all_vsi_ctx(hw); 1200 } 1201 1202 /** 1203 * ice_check_reset - Check to see if a global reset is complete 1204 * @hw: pointer to the hardware structure 1205 */ 1206 enum ice_status ice_check_reset(struct ice_hw *hw) 1207 { 1208 u32 cnt, reg = 0, grst_timeout, uld_mask; 1209 1210 /* Poll for Device Active state in case a recent CORER, GLOBR, 1211 * or EMPR has occurred. The grst delay value is in 100ms units. 1212 * Add 1sec for outstanding AQ commands that can take a long time. 1213 */ 1214 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1215 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1216 1217 for (cnt = 0; cnt < grst_timeout; cnt++) { 1218 ice_msec_delay(100, true); 1219 reg = rd32(hw, GLGEN_RSTAT); 1220 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1221 break; 1222 } 1223 1224 if (cnt == grst_timeout) { 1225 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1226 return ICE_ERR_RESET_FAILED; 1227 } 1228 1229 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1230 GLNVM_ULD_PCIER_DONE_1_M |\ 1231 GLNVM_ULD_CORER_DONE_M |\ 1232 GLNVM_ULD_GLOBR_DONE_M |\ 1233 GLNVM_ULD_POR_DONE_M |\ 1234 GLNVM_ULD_POR_DONE_1_M |\ 1235 GLNVM_ULD_PCIER_DONE_2_M) 1236 1237 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ? 1238 GLNVM_ULD_PE_DONE_M : 0); 1239 1240 /* Device is Active; check Global Reset processes are done */ 1241 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1242 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1243 if (reg == uld_mask) { 1244 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1245 break; 1246 } 1247 ice_msec_delay(10, true); 1248 } 1249 1250 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1251 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1252 reg); 1253 return ICE_ERR_RESET_FAILED; 1254 } 1255 1256 return ICE_SUCCESS; 1257 } 1258 1259 /** 1260 * ice_pf_reset - Reset the PF 1261 * @hw: pointer to the hardware structure 1262 * 1263 * If a global reset has been triggered, this function checks 1264 * for its completion and then issues the PF reset 1265 */ 1266 static enum ice_status ice_pf_reset(struct ice_hw *hw) 1267 { 1268 u32 cnt, reg; 1269 1270 /* If at function entry a global reset was already in progress, i.e. 1271 * state is not 'device active' or any of the reset done bits are not 1272 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1273 * global reset is done. 1274 */ 1275 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1276 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1277 /* poll on global reset currently in progress until done */ 1278 if (ice_check_reset(hw)) 1279 return ICE_ERR_RESET_FAILED; 1280 1281 return ICE_SUCCESS; 1282 } 1283 1284 /* Reset the PF */ 1285 reg = rd32(hw, PFGEN_CTRL); 1286 1287 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1288 1289 /* Wait for the PFR to complete. The wait time is the global config lock 1290 * timeout plus the PFR timeout which will account for a possible reset 1291 * that is occurring during a download package operation. 1292 */ 1293 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1294 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1295 reg = rd32(hw, PFGEN_CTRL); 1296 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1297 break; 1298 1299 ice_msec_delay(1, true); 1300 } 1301 1302 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1303 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1304 return ICE_ERR_RESET_FAILED; 1305 } 1306 1307 return ICE_SUCCESS; 1308 } 1309 1310 /** 1311 * ice_reset - Perform different types of reset 1312 * @hw: pointer to the hardware structure 1313 * @req: reset request 1314 * 1315 * This function triggers a reset as specified by the req parameter. 1316 * 1317 * Note: 1318 * If anything other than a PF reset is triggered, PXE mode is restored. 1319 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1320 * interface has been restored in the rebuild flow. 1321 */ 1322 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1323 { 1324 u32 val = 0; 1325 1326 switch (req) { 1327 case ICE_RESET_PFR: 1328 return ice_pf_reset(hw); 1329 case ICE_RESET_CORER: 1330 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1331 val = GLGEN_RTRIG_CORER_M; 1332 break; 1333 case ICE_RESET_GLOBR: 1334 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1335 val = GLGEN_RTRIG_GLOBR_M; 1336 break; 1337 default: 1338 return ICE_ERR_PARAM; 1339 } 1340 1341 val |= rd32(hw, GLGEN_RTRIG); 1342 wr32(hw, GLGEN_RTRIG, val); 1343 ice_flush(hw); 1344 1345 /* wait for the FW to be ready */ 1346 return ice_check_reset(hw); 1347 } 1348 1349 /** 1350 * ice_copy_rxq_ctx_to_hw 1351 * @hw: pointer to the hardware structure 1352 * @ice_rxq_ctx: pointer to the rxq context 1353 * @rxq_index: the index of the Rx queue 1354 * 1355 * Copies rxq context from dense structure to HW register space 1356 */ 1357 static enum ice_status 1358 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1359 { 1360 u8 i; 1361 1362 if (!ice_rxq_ctx) 1363 return ICE_ERR_BAD_PTR; 1364 1365 if (rxq_index > QRX_CTRL_MAX_INDEX) 1366 return ICE_ERR_PARAM; 1367 1368 /* Copy each dword separately to HW */ 1369 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1370 wr32(hw, QRX_CONTEXT(i, rxq_index), 1371 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1372 1373 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1374 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1375 } 1376 1377 return ICE_SUCCESS; 1378 } 1379 1380 /** 1381 * ice_copy_rxq_ctx_from_hw - Copy rxq context register from HW 1382 * @hw: pointer to the hardware structure 1383 * @ice_rxq_ctx: pointer to the rxq context 1384 * @rxq_index: the index of the Rx queue 1385 * 1386 * Copies rxq context from HW register space to dense structure 1387 */ 1388 static enum ice_status 1389 ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1390 { 1391 u8 i; 1392 1393 if (!ice_rxq_ctx) 1394 return ICE_ERR_BAD_PTR; 1395 1396 if (rxq_index > QRX_CTRL_MAX_INDEX) 1397 return ICE_ERR_PARAM; 1398 1399 /* Copy each dword separately from HW */ 1400 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1401 u32 *ctx = (u32 *)(ice_rxq_ctx + (i * sizeof(u32))); 1402 1403 *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index)); 1404 1405 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx); 1406 } 1407 1408 return ICE_SUCCESS; 1409 } 1410 1411 /* LAN Rx Queue Context */ 1412 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1413 /* Field Width LSB */ 1414 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1415 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1416 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1417 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1418 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1419 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1420 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1421 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1422 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1423 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1424 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1425 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1426 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1427 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1428 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1429 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1430 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1431 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1432 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1433 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1434 { 0 } 1435 }; 1436 1437 /** 1438 * ice_write_rxq_ctx 1439 * @hw: pointer to the hardware structure 1440 * @rlan_ctx: pointer to the rxq context 1441 * @rxq_index: the index of the Rx queue 1442 * 1443 * Converts rxq context from sparse to dense structure and then writes 1444 * it to HW register space and enables the hardware to prefetch descriptors 1445 * instead of only fetching them on demand 1446 */ 1447 enum ice_status 1448 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1449 u32 rxq_index) 1450 { 1451 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1452 1453 if (!rlan_ctx) 1454 return ICE_ERR_BAD_PTR; 1455 1456 rlan_ctx->prefena = 1; 1457 1458 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1459 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1460 } 1461 1462 /** 1463 * ice_read_rxq_ctx - Read rxq context from HW 1464 * @hw: pointer to the hardware structure 1465 * @rlan_ctx: pointer to the rxq context 1466 * @rxq_index: the index of the Rx queue 1467 * 1468 * Read rxq context from HW register space and then converts it from dense 1469 * structure to sparse 1470 */ 1471 enum ice_status 1472 ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1473 u32 rxq_index) 1474 { 1475 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1476 enum ice_status status; 1477 1478 if (!rlan_ctx) 1479 return ICE_ERR_BAD_PTR; 1480 1481 status = ice_copy_rxq_ctx_from_hw(hw, ctx_buf, rxq_index); 1482 if (status) 1483 return status; 1484 1485 return ice_get_ctx(ctx_buf, (u8 *)rlan_ctx, ice_rlan_ctx_info); 1486 } 1487 1488 /** 1489 * ice_clear_rxq_ctx 1490 * @hw: pointer to the hardware structure 1491 * @rxq_index: the index of the Rx queue to clear 1492 * 1493 * Clears rxq context in HW register space 1494 */ 1495 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index) 1496 { 1497 u8 i; 1498 1499 if (rxq_index > QRX_CTRL_MAX_INDEX) 1500 return ICE_ERR_PARAM; 1501 1502 /* Clear each dword register separately */ 1503 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) 1504 wr32(hw, QRX_CONTEXT(i, rxq_index), 0); 1505 1506 return ICE_SUCCESS; 1507 } 1508 1509 /* LAN Tx Queue Context used for set Tx config by ice_aqc_opc_add_txqs, 1510 * Bit[0-175] is valid 1511 */ 1512 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1513 /* Field Width LSB */ 1514 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1515 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1516 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1517 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1518 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1519 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1520 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1521 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1522 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1523 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1524 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1525 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1526 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1527 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1528 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1529 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1530 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1531 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1532 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1533 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1534 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1535 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1536 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1537 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1538 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1539 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1540 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1541 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1542 { 0 } 1543 }; 1544 1545 /** 1546 * ice_copy_tx_cmpltnq_ctx_to_hw 1547 * @hw: pointer to the hardware structure 1548 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context 1549 * @tx_cmpltnq_index: the index of the completion queue 1550 * 1551 * Copies Tx completion queue context from dense structure to HW register space 1552 */ 1553 static enum ice_status 1554 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx, 1555 u32 tx_cmpltnq_index) 1556 { 1557 u8 i; 1558 1559 if (!ice_tx_cmpltnq_ctx) 1560 return ICE_ERR_BAD_PTR; 1561 1562 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) 1563 return ICE_ERR_PARAM; 1564 1565 /* Copy each dword separately to HW */ 1566 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) { 1567 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 1568 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); 1569 1570 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i, 1571 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); 1572 } 1573 1574 return ICE_SUCCESS; 1575 } 1576 1577 /* LAN Tx Completion Queue Context */ 1578 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = { 1579 /* Field Width LSB */ 1580 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0), 1581 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64), 1582 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96), 1583 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97), 1584 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128), 1585 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131), 1586 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141), 1587 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160), 1588 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161), 1589 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192), 1590 { 0 } 1591 }; 1592 1593 /** 1594 * ice_write_tx_cmpltnq_ctx 1595 * @hw: pointer to the hardware structure 1596 * @tx_cmpltnq_ctx: pointer to the completion queue context 1597 * @tx_cmpltnq_index: the index of the completion queue 1598 * 1599 * Converts completion queue context from sparse to dense structure and then 1600 * writes it to HW register space 1601 */ 1602 enum ice_status 1603 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, 1604 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, 1605 u32 tx_cmpltnq_index) 1606 { 1607 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; 1608 1609 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info); 1610 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index); 1611 } 1612 1613 /** 1614 * ice_clear_tx_cmpltnq_ctx 1615 * @hw: pointer to the hardware structure 1616 * @tx_cmpltnq_index: the index of the completion queue to clear 1617 * 1618 * Clears Tx completion queue context in HW register space 1619 */ 1620 enum ice_status 1621 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index) 1622 { 1623 u8 i; 1624 1625 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) 1626 return ICE_ERR_PARAM; 1627 1628 /* Clear each dword register separately */ 1629 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) 1630 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0); 1631 1632 return ICE_SUCCESS; 1633 } 1634 1635 /** 1636 * ice_copy_tx_drbell_q_ctx_to_hw 1637 * @hw: pointer to the hardware structure 1638 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context 1639 * @tx_drbell_q_index: the index of the doorbell queue 1640 * 1641 * Copies doorbell queue context from dense structure to HW register space 1642 */ 1643 static enum ice_status 1644 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx, 1645 u32 tx_drbell_q_index) 1646 { 1647 u8 i; 1648 1649 if (!ice_tx_drbell_q_ctx) 1650 return ICE_ERR_BAD_PTR; 1651 1652 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) 1653 return ICE_ERR_PARAM; 1654 1655 /* Copy each dword separately to HW */ 1656 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) { 1657 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 1658 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); 1659 1660 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i, 1661 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); 1662 } 1663 1664 return ICE_SUCCESS; 1665 } 1666 1667 /* LAN Tx Doorbell Queue Context info */ 1668 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = { 1669 /* Field Width LSB */ 1670 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0), 1671 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64), 1672 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80), 1673 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84), 1674 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94), 1675 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96), 1676 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104), 1677 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108), 1678 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112), 1679 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128), 1680 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144), 1681 { 0 } 1682 }; 1683 1684 /** 1685 * ice_write_tx_drbell_q_ctx 1686 * @hw: pointer to the hardware structure 1687 * @tx_drbell_q_ctx: pointer to the doorbell queue context 1688 * @tx_drbell_q_index: the index of the doorbell queue 1689 * 1690 * Converts doorbell queue context from sparse to dense structure and then 1691 * writes it to HW register space 1692 */ 1693 enum ice_status 1694 ice_write_tx_drbell_q_ctx(struct ice_hw *hw, 1695 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, 1696 u32 tx_drbell_q_index) 1697 { 1698 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; 1699 1700 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf, 1701 ice_tx_drbell_q_ctx_info); 1702 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index); 1703 } 1704 1705 /** 1706 * ice_clear_tx_drbell_q_ctx 1707 * @hw: pointer to the hardware structure 1708 * @tx_drbell_q_index: the index of the doorbell queue to clear 1709 * 1710 * Clears doorbell queue context in HW register space 1711 */ 1712 enum ice_status 1713 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index) 1714 { 1715 u8 i; 1716 1717 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) 1718 return ICE_ERR_PARAM; 1719 1720 /* Clear each dword register separately */ 1721 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) 1722 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0); 1723 1724 return ICE_SUCCESS; 1725 } 1726 1727 /* FW Admin Queue command wrappers */ 1728 1729 /** 1730 * ice_should_retry_sq_send_cmd 1731 * @opcode: AQ opcode 1732 * 1733 * Decide if we should retry the send command routine for the ATQ, depending 1734 * on the opcode. 1735 */ 1736 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1737 { 1738 switch (opcode) { 1739 case ice_aqc_opc_dnl_get_status: 1740 case ice_aqc_opc_dnl_run: 1741 case ice_aqc_opc_dnl_call: 1742 case ice_aqc_opc_dnl_read_sto: 1743 case ice_aqc_opc_dnl_write_sto: 1744 case ice_aqc_opc_dnl_set_breakpoints: 1745 case ice_aqc_opc_dnl_read_log: 1746 case ice_aqc_opc_get_link_topo: 1747 case ice_aqc_opc_done_alt_write: 1748 case ice_aqc_opc_lldp_stop: 1749 case ice_aqc_opc_lldp_start: 1750 case ice_aqc_opc_lldp_filter_ctrl: 1751 return true; 1752 } 1753 1754 return false; 1755 } 1756 1757 /** 1758 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1759 * @hw: pointer to the HW struct 1760 * @cq: pointer to the specific Control queue 1761 * @desc: prefilled descriptor describing the command 1762 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1763 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1764 * @cd: pointer to command details structure 1765 * 1766 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1767 * Queue if the EBUSY AQ error is returned. 1768 */ 1769 static enum ice_status 1770 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1771 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1772 struct ice_sq_cd *cd) 1773 { 1774 struct ice_aq_desc desc_cpy; 1775 enum ice_status status; 1776 bool is_cmd_for_retry; 1777 u8 *buf_cpy = NULL; 1778 u8 idx = 0; 1779 u16 opcode; 1780 1781 opcode = LE16_TO_CPU(desc->opcode); 1782 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1783 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM); 1784 1785 if (is_cmd_for_retry) { 1786 if (buf) { 1787 buf_cpy = (u8 *)ice_malloc(hw, buf_size); 1788 if (!buf_cpy) 1789 return ICE_ERR_NO_MEMORY; 1790 } 1791 1792 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy), 1793 ICE_NONDMA_TO_NONDMA); 1794 } 1795 1796 do { 1797 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1798 1799 if (!is_cmd_for_retry || status == ICE_SUCCESS || 1800 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1801 break; 1802 1803 if (buf_cpy) 1804 ice_memcpy(buf, buf_cpy, buf_size, 1805 ICE_NONDMA_TO_NONDMA); 1806 1807 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy), 1808 ICE_NONDMA_TO_NONDMA); 1809 1810 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false); 1811 1812 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1813 1814 if (buf_cpy) 1815 ice_free(hw, buf_cpy); 1816 1817 return status; 1818 } 1819 1820 /** 1821 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1822 * @hw: pointer to the HW struct 1823 * @desc: descriptor describing the command 1824 * @buf: buffer to use for indirect commands (NULL for direct commands) 1825 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1826 * @cd: pointer to command details structure 1827 * 1828 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1829 */ 1830 enum ice_status 1831 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1832 u16 buf_size, struct ice_sq_cd *cd) 1833 { 1834 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1835 } 1836 1837 /** 1838 * ice_aq_get_fw_ver 1839 * @hw: pointer to the HW struct 1840 * @cd: pointer to command details structure or NULL 1841 * 1842 * Get the firmware version (0x0001) from the admin queue commands 1843 */ 1844 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1845 { 1846 struct ice_aqc_get_ver *resp; 1847 struct ice_aq_desc desc; 1848 enum ice_status status; 1849 1850 resp = &desc.params.get_ver; 1851 1852 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1853 1854 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1855 1856 if (!status) { 1857 hw->fw_branch = resp->fw_branch; 1858 hw->fw_maj_ver = resp->fw_major; 1859 hw->fw_min_ver = resp->fw_minor; 1860 hw->fw_patch = resp->fw_patch; 1861 hw->fw_build = LE32_TO_CPU(resp->fw_build); 1862 hw->api_branch = resp->api_branch; 1863 hw->api_maj_ver = resp->api_major; 1864 hw->api_min_ver = resp->api_minor; 1865 hw->api_patch = resp->api_patch; 1866 } 1867 1868 return status; 1869 } 1870 1871 /** 1872 * ice_aq_send_driver_ver 1873 * @hw: pointer to the HW struct 1874 * @dv: driver's major, minor version 1875 * @cd: pointer to command details structure or NULL 1876 * 1877 * Send the driver version (0x0002) to the firmware 1878 */ 1879 enum ice_status 1880 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1881 struct ice_sq_cd *cd) 1882 { 1883 struct ice_aqc_driver_ver *cmd; 1884 struct ice_aq_desc desc; 1885 u16 len; 1886 1887 cmd = &desc.params.driver_ver; 1888 1889 if (!dv) 1890 return ICE_ERR_PARAM; 1891 1892 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1893 1894 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 1895 cmd->major_ver = dv->major_ver; 1896 cmd->minor_ver = dv->minor_ver; 1897 cmd->build_ver = dv->build_ver; 1898 cmd->subbuild_ver = dv->subbuild_ver; 1899 1900 len = 0; 1901 while (len < sizeof(dv->driver_string) && 1902 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len]) 1903 len++; 1904 1905 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1906 } 1907 1908 /** 1909 * ice_aq_q_shutdown 1910 * @hw: pointer to the HW struct 1911 * @unloading: is the driver unloading itself 1912 * 1913 * Tell the Firmware that we're shutting down the AdminQ and whether 1914 * or not the driver is unloading as well (0x0003). 1915 */ 1916 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1917 { 1918 struct ice_aqc_q_shutdown *cmd; 1919 struct ice_aq_desc desc; 1920 1921 cmd = &desc.params.q_shutdown; 1922 1923 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1924 1925 if (unloading) 1926 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1927 1928 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1929 } 1930 1931 /** 1932 * ice_aq_req_res 1933 * @hw: pointer to the HW struct 1934 * @res: resource ID 1935 * @access: access type 1936 * @sdp_number: resource number 1937 * @timeout: the maximum time in ms that the driver may hold the resource 1938 * @cd: pointer to command details structure or NULL 1939 * 1940 * Requests common resource using the admin queue commands (0x0008). 1941 * When attempting to acquire the Global Config Lock, the driver can 1942 * learn of three states: 1943 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1944 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1945 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1946 * successfully downloaded the package; the driver does 1947 * not have to download the package and can continue 1948 * loading 1949 * 1950 * Note that if the caller is in an acquire lock, perform action, release lock 1951 * phase of operation, it is possible that the FW may detect a timeout and issue 1952 * a CORER. In this case, the driver will receive a CORER interrupt and will 1953 * have to determine its cause. The calling thread that is handling this flow 1954 * will likely get an error propagated back to it indicating the Download 1955 * Package, Update Package or the Release Resource AQ commands timed out. 1956 */ 1957 static enum ice_status 1958 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1959 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1960 struct ice_sq_cd *cd) 1961 { 1962 struct ice_aqc_req_res *cmd_resp; 1963 struct ice_aq_desc desc; 1964 enum ice_status status; 1965 1966 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1967 1968 cmd_resp = &desc.params.res_owner; 1969 1970 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1971 1972 cmd_resp->res_id = CPU_TO_LE16(res); 1973 cmd_resp->access_type = CPU_TO_LE16(access); 1974 cmd_resp->res_number = CPU_TO_LE32(sdp_number); 1975 cmd_resp->timeout = CPU_TO_LE32(*timeout); 1976 *timeout = 0; 1977 1978 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1979 1980 /* The completion specifies the maximum time in ms that the driver 1981 * may hold the resource in the Timeout field. 1982 */ 1983 1984 /* Global config lock response utilizes an additional status field. 1985 * 1986 * If the Global config lock resource is held by some other driver, the 1987 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1988 * and the timeout field indicates the maximum time the current owner 1989 * of the resource has to free it. 1990 */ 1991 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1992 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1993 *timeout = LE32_TO_CPU(cmd_resp->timeout); 1994 return ICE_SUCCESS; 1995 } else if (LE16_TO_CPU(cmd_resp->status) == 1996 ICE_AQ_RES_GLBL_IN_PROG) { 1997 *timeout = LE32_TO_CPU(cmd_resp->timeout); 1998 return ICE_ERR_AQ_ERROR; 1999 } else if (LE16_TO_CPU(cmd_resp->status) == 2000 ICE_AQ_RES_GLBL_DONE) { 2001 return ICE_ERR_AQ_NO_WORK; 2002 } 2003 2004 /* invalid FW response, force a timeout immediately */ 2005 *timeout = 0; 2006 return ICE_ERR_AQ_ERROR; 2007 } 2008 2009 /* If the resource is held by some other driver, the command completes 2010 * with a busy return value and the timeout field indicates the maximum 2011 * time the current owner of the resource has to free it. 2012 */ 2013 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 2014 *timeout = LE32_TO_CPU(cmd_resp->timeout); 2015 2016 return status; 2017 } 2018 2019 /** 2020 * ice_aq_release_res 2021 * @hw: pointer to the HW struct 2022 * @res: resource ID 2023 * @sdp_number: resource number 2024 * @cd: pointer to command details structure or NULL 2025 * 2026 * release common resource using the admin queue commands (0x0009) 2027 */ 2028 static enum ice_status 2029 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 2030 struct ice_sq_cd *cd) 2031 { 2032 struct ice_aqc_req_res *cmd; 2033 struct ice_aq_desc desc; 2034 2035 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2036 2037 cmd = &desc.params.res_owner; 2038 2039 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 2040 2041 cmd->res_id = CPU_TO_LE16(res); 2042 cmd->res_number = CPU_TO_LE32(sdp_number); 2043 2044 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2045 } 2046 2047 /** 2048 * ice_acquire_res 2049 * @hw: pointer to the HW structure 2050 * @res: resource ID 2051 * @access: access type (read or write) 2052 * @timeout: timeout in milliseconds 2053 * 2054 * This function will attempt to acquire the ownership of a resource. 2055 */ 2056 enum ice_status 2057 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 2058 enum ice_aq_res_access_type access, u32 timeout) 2059 { 2060 #define ICE_RES_POLLING_DELAY_MS 10 2061 u32 delay = ICE_RES_POLLING_DELAY_MS; 2062 u32 time_left = timeout; 2063 enum ice_status status; 2064 2065 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2066 2067 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2068 2069 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 2070 * previously acquired the resource and performed any necessary updates; 2071 * in this case the caller does not obtain the resource and has no 2072 * further work to do. 2073 */ 2074 if (status == ICE_ERR_AQ_NO_WORK) 2075 goto ice_acquire_res_exit; 2076 2077 if (status) 2078 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 2079 2080 /* If necessary, poll until the current lock owner timeouts */ 2081 timeout = time_left; 2082 while (status && timeout && time_left) { 2083 ice_msec_delay(delay, true); 2084 timeout = (timeout > delay) ? timeout - delay : 0; 2085 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2086 2087 if (status == ICE_ERR_AQ_NO_WORK) 2088 /* lock free, but no work to do */ 2089 break; 2090 2091 if (!status) 2092 /* lock acquired */ 2093 break; 2094 } 2095 if (status && status != ICE_ERR_AQ_NO_WORK) 2096 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 2097 2098 ice_acquire_res_exit: 2099 if (status == ICE_ERR_AQ_NO_WORK) { 2100 if (access == ICE_RES_WRITE) 2101 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 2102 else 2103 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 2104 } 2105 return status; 2106 } 2107 2108 /** 2109 * ice_release_res 2110 * @hw: pointer to the HW structure 2111 * @res: resource ID 2112 * 2113 * This function will release a resource using the proper Admin Command. 2114 */ 2115 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 2116 { 2117 enum ice_status status; 2118 u32 total_delay = 0; 2119 2120 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2121 2122 status = ice_aq_release_res(hw, res, 0, NULL); 2123 2124 /* there are some rare cases when trying to release the resource 2125 * results in an admin queue timeout, so handle them correctly 2126 */ 2127 while ((status == ICE_ERR_AQ_TIMEOUT) && 2128 (total_delay < hw->adminq.sq_cmd_timeout)) { 2129 ice_msec_delay(1, true); 2130 status = ice_aq_release_res(hw, res, 0, NULL); 2131 total_delay++; 2132 } 2133 } 2134 2135 /** 2136 * ice_aq_alloc_free_res - command to allocate/free resources 2137 * @hw: pointer to the HW struct 2138 * @num_entries: number of resource entries in buffer 2139 * @buf: Indirect buffer to hold data parameters and response 2140 * @buf_size: size of buffer for indirect commands 2141 * @opc: pass in the command opcode 2142 * @cd: pointer to command details structure or NULL 2143 * 2144 * Helper function to allocate/free resources using the admin queue commands 2145 */ 2146 enum ice_status 2147 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 2148 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2149 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2150 { 2151 struct ice_aqc_alloc_free_res_cmd *cmd; 2152 struct ice_aq_desc desc; 2153 2154 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2155 2156 cmd = &desc.params.sw_res_ctrl; 2157 2158 if (!buf) 2159 return ICE_ERR_PARAM; 2160 2161 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries)) 2162 return ICE_ERR_PARAM; 2163 2164 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2165 2166 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 2167 2168 cmd->num_entries = CPU_TO_LE16(num_entries); 2169 2170 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2171 } 2172 2173 /** 2174 * ice_alloc_hw_res - allocate resource 2175 * @hw: pointer to the HW struct 2176 * @type: type of resource 2177 * @num: number of resources to allocate 2178 * @btm: allocate from bottom 2179 * @res: pointer to array that will receive the resources 2180 */ 2181 enum ice_status 2182 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2183 { 2184 struct ice_aqc_alloc_free_res_elem *buf; 2185 enum ice_status status; 2186 u16 buf_len; 2187 2188 buf_len = ice_struct_size(buf, elem, num); 2189 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); 2190 if (!buf) 2191 return ICE_ERR_NO_MEMORY; 2192 2193 /* Prepare buffer to allocate resource. */ 2194 buf->num_elems = CPU_TO_LE16(num); 2195 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2196 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2197 if (btm) 2198 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2199 2200 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2201 ice_aqc_opc_alloc_res, NULL); 2202 if (status) 2203 goto ice_alloc_res_exit; 2204 2205 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num, 2206 ICE_NONDMA_TO_NONDMA); 2207 2208 ice_alloc_res_exit: 2209 ice_free(hw, buf); 2210 return status; 2211 } 2212 2213 /** 2214 * ice_free_hw_res - free allocated HW resource 2215 * @hw: pointer to the HW struct 2216 * @type: type of resource to free 2217 * @num: number of resources 2218 * @res: pointer to array that contains the resources to free 2219 */ 2220 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2221 { 2222 struct ice_aqc_alloc_free_res_elem *buf; 2223 enum ice_status status; 2224 u16 buf_len; 2225 2226 buf_len = ice_struct_size(buf, elem, num); 2227 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); 2228 if (!buf) 2229 return ICE_ERR_NO_MEMORY; 2230 2231 /* Prepare buffer to free resource. */ 2232 buf->num_elems = CPU_TO_LE16(num); 2233 buf->res_type = CPU_TO_LE16(type); 2234 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num, 2235 ICE_NONDMA_TO_NONDMA); 2236 2237 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 2238 ice_aqc_opc_free_res, NULL); 2239 if (status) 2240 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2241 2242 ice_free(hw, buf); 2243 return status; 2244 } 2245 2246 /** 2247 * ice_get_num_per_func - determine number of resources per PF 2248 * @hw: pointer to the HW structure 2249 * @max: value to be evenly split between each PF 2250 * 2251 * Determine the number of valid functions by going through the bitmap returned 2252 * from parsing capabilities and use this to calculate the number of resources 2253 * per PF based on the max value passed in. 2254 */ 2255 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2256 { 2257 u8 funcs; 2258 2259 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2260 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions & 2261 ICE_CAPS_VALID_FUNCS_M); 2262 2263 if (!funcs) 2264 return 0; 2265 2266 return max / funcs; 2267 } 2268 2269 /** 2270 * ice_print_led_caps - print LED capabilities 2271 * @hw: pointer to the ice_hw instance 2272 * @caps: pointer to common caps instance 2273 * @prefix: string to prefix when printing 2274 * @dbg: set to indicate debug print 2275 */ 2276 static void 2277 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2278 char const *prefix, bool dbg) 2279 { 2280 u8 i; 2281 2282 if (dbg) 2283 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix, 2284 caps->led_pin_num); 2285 else 2286 ice_info(hw, "%s: led_pin_num = %d\n", prefix, 2287 caps->led_pin_num); 2288 2289 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) { 2290 if (!caps->led[i]) 2291 continue; 2292 2293 if (dbg) 2294 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n", 2295 prefix, i, caps->led[i]); 2296 else 2297 ice_info(hw, "%s: led[%d] = %d\n", prefix, i, 2298 caps->led[i]); 2299 } 2300 } 2301 2302 /** 2303 * ice_print_sdp_caps - print SDP capabilities 2304 * @hw: pointer to the ice_hw instance 2305 * @caps: pointer to common caps instance 2306 * @prefix: string to prefix when printing 2307 * @dbg: set to indicate debug print 2308 */ 2309 static void 2310 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2311 char const *prefix, bool dbg) 2312 { 2313 u8 i; 2314 2315 if (dbg) 2316 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix, 2317 caps->sdp_pin_num); 2318 else 2319 ice_info(hw, "%s: sdp_pin_num = %d\n", prefix, 2320 caps->sdp_pin_num); 2321 2322 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) { 2323 if (!caps->sdp[i]) 2324 continue; 2325 2326 if (dbg) 2327 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n", 2328 prefix, i, caps->sdp[i]); 2329 else 2330 ice_info(hw, "%s: sdp[%d] = %d\n", prefix, 2331 i, caps->sdp[i]); 2332 } 2333 } 2334 2335 /** 2336 * ice_parse_common_caps - parse common device/function capabilities 2337 * @hw: pointer to the HW struct 2338 * @caps: pointer to common capabilities structure 2339 * @elem: the capability element to parse 2340 * @prefix: message prefix for tracing capabilities 2341 * 2342 * Given a capability element, extract relevant details into the common 2343 * capability structure. 2344 * 2345 * Returns: true if the capability matches one of the common capability ids, 2346 * false otherwise. 2347 */ 2348 static bool 2349 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2350 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2351 { 2352 u32 logical_id = LE32_TO_CPU(elem->logical_id); 2353 u32 phys_id = LE32_TO_CPU(elem->phys_id); 2354 u32 number = LE32_TO_CPU(elem->number); 2355 u16 cap = LE16_TO_CPU(elem->cap); 2356 bool found = true; 2357 2358 switch (cap) { 2359 case ICE_AQC_CAPS_SWITCHING_MODE: 2360 caps->switching_mode = number; 2361 ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix, 2362 caps->switching_mode); 2363 break; 2364 case ICE_AQC_CAPS_MANAGEABILITY_MODE: 2365 caps->mgmt_mode = number; 2366 caps->mgmt_protocols_mctp = logical_id; 2367 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix, 2368 caps->mgmt_mode); 2369 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix, 2370 caps->mgmt_protocols_mctp); 2371 break; 2372 case ICE_AQC_CAPS_OS2BMC: 2373 caps->os2bmc = number; 2374 ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc); 2375 break; 2376 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2377 caps->valid_functions = number; 2378 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2379 caps->valid_functions); 2380 break; 2381 case ICE_AQC_CAPS_SRIOV: 2382 caps->sr_iov_1_1 = (number == 1); 2383 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2384 caps->sr_iov_1_1); 2385 break; 2386 case ICE_AQC_CAPS_802_1QBG: 2387 caps->evb_802_1_qbg = (number == 1); 2388 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number); 2389 break; 2390 case ICE_AQC_CAPS_802_1BR: 2391 caps->evb_802_1_qbh = (number == 1); 2392 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number); 2393 break; 2394 case ICE_AQC_CAPS_DCB: 2395 caps->dcb = (number == 1); 2396 caps->active_tc_bitmap = logical_id; 2397 caps->maxtc = phys_id; 2398 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2399 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2400 caps->active_tc_bitmap); 2401 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2402 break; 2403 case ICE_AQC_CAPS_ISCSI: 2404 caps->iscsi = (number == 1); 2405 ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi); 2406 break; 2407 case ICE_AQC_CAPS_RSS: 2408 caps->rss_table_size = number; 2409 caps->rss_table_entry_width = logical_id; 2410 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2411 caps->rss_table_size); 2412 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2413 caps->rss_table_entry_width); 2414 break; 2415 case ICE_AQC_CAPS_RXQS: 2416 caps->num_rxq = number; 2417 caps->rxq_first_id = phys_id; 2418 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2419 caps->num_rxq); 2420 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2421 caps->rxq_first_id); 2422 break; 2423 case ICE_AQC_CAPS_TXQS: 2424 caps->num_txq = number; 2425 caps->txq_first_id = phys_id; 2426 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2427 caps->num_txq); 2428 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2429 caps->txq_first_id); 2430 break; 2431 case ICE_AQC_CAPS_MSIX: 2432 caps->num_msix_vectors = number; 2433 caps->msix_vector_first_id = phys_id; 2434 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2435 caps->num_msix_vectors); 2436 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2437 caps->msix_vector_first_id); 2438 break; 2439 case ICE_AQC_CAPS_NVM_MGMT: 2440 caps->sec_rev_disabled = 2441 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ? 2442 true : false; 2443 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix, 2444 caps->sec_rev_disabled); 2445 caps->update_disabled = 2446 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ? 2447 true : false; 2448 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix, 2449 caps->update_disabled); 2450 caps->nvm_unified_update = 2451 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2452 true : false; 2453 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2454 caps->nvm_unified_update); 2455 break; 2456 case ICE_AQC_CAPS_CEM: 2457 caps->mgmt_cem = (number == 1); 2458 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix, 2459 caps->mgmt_cem); 2460 break; 2461 case ICE_AQC_CAPS_IWARP: 2462 caps->iwarp = (number == 1); 2463 ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp); 2464 break; 2465 case ICE_AQC_CAPS_ROCEV2_LAG: 2466 caps->roce_lag = (number == 1); 2467 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n", 2468 prefix, caps->roce_lag); 2469 break; 2470 case ICE_AQC_CAPS_LED: 2471 if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) { 2472 caps->led[phys_id] = true; 2473 caps->led_pin_num++; 2474 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id); 2475 } 2476 break; 2477 case ICE_AQC_CAPS_SDP: 2478 if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) { 2479 caps->sdp[phys_id] = true; 2480 caps->sdp_pin_num++; 2481 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id); 2482 } 2483 break; 2484 case ICE_AQC_CAPS_WR_CSR_PROT: 2485 caps->wr_csr_prot = number; 2486 caps->wr_csr_prot |= (u64)logical_id << 32; 2487 ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix, 2488 (unsigned long long)caps->wr_csr_prot); 2489 break; 2490 case ICE_AQC_CAPS_WOL_PROXY: 2491 caps->num_wol_proxy_fltr = number; 2492 caps->wol_proxy_vsi_seid = logical_id; 2493 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M); 2494 caps->acpi_prog_mthd = !!(phys_id & 2495 ICE_ACPI_PROG_MTHD_M); 2496 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M); 2497 ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix, 2498 caps->num_wol_proxy_fltr); 2499 ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix, 2500 caps->wol_proxy_vsi_seid); 2501 ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n", 2502 prefix, caps->apm_wol_support); 2503 break; 2504 case ICE_AQC_CAPS_MAX_MTU: 2505 caps->max_mtu = number; 2506 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2507 prefix, caps->max_mtu); 2508 break; 2509 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2510 caps->pcie_reset_avoidance = (number > 0); 2511 ice_debug(hw, ICE_DBG_INIT, 2512 "%s: pcie_reset_avoidance = %d\n", prefix, 2513 caps->pcie_reset_avoidance); 2514 break; 2515 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2516 caps->reset_restrict_support = (number == 1); 2517 ice_debug(hw, ICE_DBG_INIT, 2518 "%s: reset_restrict_support = %d\n", prefix, 2519 caps->reset_restrict_support); 2520 break; 2521 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0: 2522 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1: 2523 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2: 2524 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3: 2525 { 2526 u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0); 2527 2528 caps->ext_topo_dev_img_ver_high[index] = number; 2529 caps->ext_topo_dev_img_ver_low[index] = logical_id; 2530 caps->ext_topo_dev_img_part_num[index] = 2531 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >> 2532 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S; 2533 caps->ext_topo_dev_img_load_en[index] = 2534 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0; 2535 caps->ext_topo_dev_img_prog_en[index] = 2536 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0; 2537 ice_debug(hw, ICE_DBG_INIT, 2538 "%s: ext_topo_dev_img_ver_high[%d] = %d\n", 2539 prefix, index, 2540 caps->ext_topo_dev_img_ver_high[index]); 2541 ice_debug(hw, ICE_DBG_INIT, 2542 "%s: ext_topo_dev_img_ver_low[%d] = %d\n", 2543 prefix, index, 2544 caps->ext_topo_dev_img_ver_low[index]); 2545 ice_debug(hw, ICE_DBG_INIT, 2546 "%s: ext_topo_dev_img_part_num[%d] = %d\n", 2547 prefix, index, 2548 caps->ext_topo_dev_img_part_num[index]); 2549 ice_debug(hw, ICE_DBG_INIT, 2550 "%s: ext_topo_dev_img_load_en[%d] = %d\n", 2551 prefix, index, 2552 caps->ext_topo_dev_img_load_en[index]); 2553 ice_debug(hw, ICE_DBG_INIT, 2554 "%s: ext_topo_dev_img_prog_en[%d] = %d\n", 2555 prefix, index, 2556 caps->ext_topo_dev_img_prog_en[index]); 2557 break; 2558 } 2559 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: 2560 caps->tx_sched_topo_comp_mode_en = (number == 1); 2561 break; 2562 case ICE_AQC_CAPS_DYN_FLATTENING: 2563 caps->dyn_flattening_en = (number == 1); 2564 ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n", 2565 prefix, caps->dyn_flattening_en); 2566 break; 2567 default: 2568 /* Not one of the recognized common capabilities */ 2569 found = false; 2570 } 2571 2572 return found; 2573 } 2574 2575 /** 2576 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2577 * @hw: pointer to the HW structure 2578 * @caps: pointer to capabilities structure to fix 2579 * 2580 * Re-calculate the capabilities that are dependent on the number of physical 2581 * ports; i.e. some features are not supported or function differently on 2582 * devices with more than 4 ports. 2583 */ 2584 static void 2585 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2586 { 2587 /* This assumes device capabilities are always scanned before function 2588 * capabilities during the initialization flow. 2589 */ 2590 if (hw->dev_caps.num_funcs > 4) { 2591 /* Max 4 TCs per port */ 2592 caps->maxtc = 4; 2593 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2594 caps->maxtc); 2595 if (caps->iwarp) { 2596 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2597 caps->iwarp = 0; 2598 } 2599 2600 /* print message only when processing device capabilities 2601 * during initialization. 2602 */ 2603 if (caps == &hw->dev_caps.common_cap) 2604 ice_info(hw, "RDMA functionality is not available with the current device configuration.\n"); 2605 } 2606 } 2607 2608 /** 2609 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2610 * @hw: pointer to the HW struct 2611 * @func_p: pointer to function capabilities structure 2612 * @cap: pointer to the capability element to parse 2613 * 2614 * Extract function capabilities for ICE_AQC_CAPS_VF. 2615 */ 2616 static void 2617 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2618 struct ice_aqc_list_caps_elem *cap) 2619 { 2620 u32 number = LE32_TO_CPU(cap->number); 2621 u32 logical_id = LE32_TO_CPU(cap->logical_id); 2622 2623 func_p->num_allocd_vfs = number; 2624 func_p->vf_base_id = logical_id; 2625 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2626 func_p->num_allocd_vfs); 2627 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2628 func_p->vf_base_id); 2629 } 2630 2631 /** 2632 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2633 * @hw: pointer to the HW struct 2634 * @func_p: pointer to function capabilities structure 2635 * @cap: pointer to the capability element to parse 2636 * 2637 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2638 */ 2639 static void 2640 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2641 struct ice_aqc_list_caps_elem *cap) 2642 { 2643 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2644 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2645 LE32_TO_CPU(cap->number)); 2646 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2647 func_p->guar_num_vsi); 2648 } 2649 2650 /** 2651 * ice_parse_func_caps - Parse function capabilities 2652 * @hw: pointer to the HW struct 2653 * @func_p: pointer to function capabilities structure 2654 * @buf: buffer containing the function capability records 2655 * @cap_count: the number of capabilities 2656 * 2657 * Helper function to parse function (0x000A) capabilities list. For 2658 * capabilities shared between device and function, this relies on 2659 * ice_parse_common_caps. 2660 * 2661 * Loop through the list of provided capabilities and extract the relevant 2662 * data into the function capabilities structured. 2663 */ 2664 static void 2665 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2666 void *buf, u32 cap_count) 2667 { 2668 struct ice_aqc_list_caps_elem *cap_resp; 2669 u32 i; 2670 2671 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 2672 2673 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM); 2674 2675 for (i = 0; i < cap_count; i++) { 2676 u16 cap = LE16_TO_CPU(cap_resp[i].cap); 2677 bool found; 2678 2679 found = ice_parse_common_caps(hw, &func_p->common_cap, 2680 &cap_resp[i], "func caps"); 2681 2682 switch (cap) { 2683 case ICE_AQC_CAPS_VF: 2684 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2685 break; 2686 case ICE_AQC_CAPS_VSI: 2687 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2688 break; 2689 default: 2690 /* Don't list common capabilities as unknown */ 2691 if (!found) 2692 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2693 i, cap); 2694 break; 2695 } 2696 } 2697 2698 ice_print_led_caps(hw, &func_p->common_cap, "func caps", true); 2699 ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true); 2700 2701 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2702 } 2703 2704 /** 2705 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2706 * @hw: pointer to the HW struct 2707 * @dev_p: pointer to device capabilities structure 2708 * @cap: capability element to parse 2709 * 2710 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2711 */ 2712 static void 2713 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2714 struct ice_aqc_list_caps_elem *cap) 2715 { 2716 u32 number = LE32_TO_CPU(cap->number); 2717 2718 dev_p->num_funcs = ice_hweight32(number); 2719 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2720 dev_p->num_funcs); 2721 2722 } 2723 2724 /** 2725 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2726 * @hw: pointer to the HW struct 2727 * @dev_p: pointer to device capabilities structure 2728 * @cap: capability element to parse 2729 * 2730 * Parse ICE_AQC_CAPS_VF for device capabilities. 2731 */ 2732 static void 2733 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2734 struct ice_aqc_list_caps_elem *cap) 2735 { 2736 u32 number = LE32_TO_CPU(cap->number); 2737 2738 dev_p->num_vfs_exposed = number; 2739 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2740 dev_p->num_vfs_exposed); 2741 } 2742 2743 /** 2744 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2745 * @hw: pointer to the HW struct 2746 * @dev_p: pointer to device capabilities structure 2747 * @cap: capability element to parse 2748 * 2749 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2750 */ 2751 static void 2752 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2753 struct ice_aqc_list_caps_elem *cap) 2754 { 2755 u32 number = LE32_TO_CPU(cap->number); 2756 2757 dev_p->num_vsi_allocd_to_host = number; 2758 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2759 dev_p->num_vsi_allocd_to_host); 2760 } 2761 2762 /** 2763 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap 2764 * @hw: pointer to the HW struct 2765 * @dev_p: pointer to device capabilities structure 2766 * @cap: capability element to parse 2767 * 2768 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. 2769 */ 2770 static void 2771 ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2772 struct ice_aqc_list_caps_elem *cap) 2773 { 2774 dev_p->nac_topo.mode = LE32_TO_CPU(cap->number); 2775 dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M; 2776 2777 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", 2778 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); 2779 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", 2780 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); 2781 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", 2782 dev_p->nac_topo.id); 2783 } 2784 2785 /** 2786 * ice_parse_dev_caps - Parse device capabilities 2787 * @hw: pointer to the HW struct 2788 * @dev_p: pointer to device capabilities structure 2789 * @buf: buffer containing the device capability records 2790 * @cap_count: the number of capabilities 2791 * 2792 * Helper device to parse device (0x000B) capabilities list. For 2793 * capabilities shared between device and function, this relies on 2794 * ice_parse_common_caps. 2795 * 2796 * Loop through the list of provided capabilities and extract the relevant 2797 * data into the device capabilities structured. 2798 */ 2799 static void 2800 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2801 void *buf, u32 cap_count) 2802 { 2803 struct ice_aqc_list_caps_elem *cap_resp; 2804 u32 i; 2805 2806 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 2807 2808 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM); 2809 2810 for (i = 0; i < cap_count; i++) { 2811 u16 cap = LE16_TO_CPU(cap_resp[i].cap); 2812 bool found; 2813 2814 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2815 &cap_resp[i], "dev caps"); 2816 2817 switch (cap) { 2818 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2819 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2820 break; 2821 case ICE_AQC_CAPS_VF: 2822 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2823 break; 2824 case ICE_AQC_CAPS_VSI: 2825 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2826 break; 2827 case ICE_AQC_CAPS_NAC_TOPOLOGY: 2828 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); 2829 break; 2830 default: 2831 /* Don't list common capabilities as unknown */ 2832 if (!found) 2833 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2834 i, cap); 2835 break; 2836 } 2837 } 2838 2839 ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true); 2840 ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true); 2841 2842 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2843 } 2844 2845 /** 2846 * ice_aq_list_caps - query function/device capabilities 2847 * @hw: pointer to the HW struct 2848 * @buf: a buffer to hold the capabilities 2849 * @buf_size: size of the buffer 2850 * @cap_count: if not NULL, set to the number of capabilities reported 2851 * @opc: capabilities type to discover, device or function 2852 * @cd: pointer to command details structure or NULL 2853 * 2854 * Get the function (0x000A) or device (0x000B) capabilities description from 2855 * firmware and store it in the buffer. 2856 * 2857 * If the cap_count pointer is not NULL, then it is set to the number of 2858 * capabilities firmware will report. Note that if the buffer size is too 2859 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2860 * cap_count will still be updated in this case. It is recommended that the 2861 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2862 * firmware could return) to avoid this. 2863 */ 2864 static enum ice_status 2865 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2866 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2867 { 2868 struct ice_aqc_list_caps *cmd; 2869 struct ice_aq_desc desc; 2870 enum ice_status status; 2871 2872 cmd = &desc.params.get_cap; 2873 2874 if (opc != ice_aqc_opc_list_func_caps && 2875 opc != ice_aqc_opc_list_dev_caps) 2876 return ICE_ERR_PARAM; 2877 2878 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2879 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2880 2881 if (cap_count) 2882 *cap_count = LE32_TO_CPU(cmd->count); 2883 2884 return status; 2885 } 2886 2887 /** 2888 * ice_discover_dev_caps - Read and extract device capabilities 2889 * @hw: pointer to the hardware structure 2890 * @dev_caps: pointer to device capabilities structure 2891 * 2892 * Read the device capabilities and extract them into the dev_caps structure 2893 * for later use. 2894 */ 2895 static enum ice_status 2896 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2897 { 2898 enum ice_status status; 2899 u32 cap_count = 0; 2900 void *cbuf; 2901 2902 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); 2903 if (!cbuf) 2904 return ICE_ERR_NO_MEMORY; 2905 2906 /* Although the driver doesn't know the number of capabilities the 2907 * device will return, we can simply send a 4KB buffer, the maximum 2908 * possible size that firmware can return. 2909 */ 2910 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2911 2912 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2913 ice_aqc_opc_list_dev_caps, NULL); 2914 if (!status) 2915 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2916 ice_free(hw, cbuf); 2917 2918 return status; 2919 } 2920 2921 /** 2922 * ice_discover_func_caps - Read and extract function capabilities 2923 * @hw: pointer to the hardware structure 2924 * @func_caps: pointer to function capabilities structure 2925 * 2926 * Read the function capabilities and extract them into the func_caps structure 2927 * for later use. 2928 */ 2929 static enum ice_status 2930 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2931 { 2932 enum ice_status status; 2933 u32 cap_count = 0; 2934 void *cbuf; 2935 2936 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); 2937 if (!cbuf) 2938 return ICE_ERR_NO_MEMORY; 2939 2940 /* Although the driver doesn't know the number of capabilities the 2941 * device will return, we can simply send a 4KB buffer, the maximum 2942 * possible size that firmware can return. 2943 */ 2944 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2945 2946 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2947 ice_aqc_opc_list_func_caps, NULL); 2948 if (!status) 2949 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2950 ice_free(hw, cbuf); 2951 2952 return status; 2953 } 2954 2955 /** 2956 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2957 * @hw: pointer to the hardware structure 2958 */ 2959 void ice_set_safe_mode_caps(struct ice_hw *hw) 2960 { 2961 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2962 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2963 struct ice_hw_common_caps cached_caps; 2964 u32 num_funcs; 2965 2966 /* cache some func_caps values that should be restored after memset */ 2967 cached_caps = func_caps->common_cap; 2968 2969 /* unset func capabilities */ 2970 memset(func_caps, 0, sizeof(*func_caps)); 2971 2972 #define ICE_RESTORE_FUNC_CAP(name) \ 2973 func_caps->common_cap.name = cached_caps.name 2974 2975 /* restore cached values */ 2976 ICE_RESTORE_FUNC_CAP(valid_functions); 2977 ICE_RESTORE_FUNC_CAP(txq_first_id); 2978 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2979 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2980 ICE_RESTORE_FUNC_CAP(max_mtu); 2981 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2982 2983 /* one Tx and one Rx queue in safe mode */ 2984 func_caps->common_cap.num_rxq = 1; 2985 func_caps->common_cap.num_txq = 1; 2986 2987 /* two MSIX vectors, one for traffic and one for misc causes */ 2988 func_caps->common_cap.num_msix_vectors = 2; 2989 func_caps->guar_num_vsi = 1; 2990 2991 /* cache some dev_caps values that should be restored after memset */ 2992 cached_caps = dev_caps->common_cap; 2993 num_funcs = dev_caps->num_funcs; 2994 2995 /* unset dev capabilities */ 2996 memset(dev_caps, 0, sizeof(*dev_caps)); 2997 2998 #define ICE_RESTORE_DEV_CAP(name) \ 2999 dev_caps->common_cap.name = cached_caps.name 3000 3001 /* restore cached values */ 3002 ICE_RESTORE_DEV_CAP(valid_functions); 3003 ICE_RESTORE_DEV_CAP(txq_first_id); 3004 ICE_RESTORE_DEV_CAP(rxq_first_id); 3005 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 3006 ICE_RESTORE_DEV_CAP(max_mtu); 3007 ICE_RESTORE_DEV_CAP(nvm_unified_update); 3008 dev_caps->num_funcs = num_funcs; 3009 3010 /* one Tx and one Rx queue per function in safe mode */ 3011 dev_caps->common_cap.num_rxq = num_funcs; 3012 dev_caps->common_cap.num_txq = num_funcs; 3013 3014 /* two MSIX vectors per function */ 3015 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 3016 } 3017 3018 /** 3019 * ice_get_caps - get info about the HW 3020 * @hw: pointer to the hardware structure 3021 */ 3022 enum ice_status ice_get_caps(struct ice_hw *hw) 3023 { 3024 enum ice_status status; 3025 3026 status = ice_discover_dev_caps(hw, &hw->dev_caps); 3027 if (status) 3028 return status; 3029 3030 return ice_discover_func_caps(hw, &hw->func_caps); 3031 } 3032 3033 /** 3034 * ice_aq_manage_mac_write - manage MAC address write command 3035 * @hw: pointer to the HW struct 3036 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 3037 * @flags: flags to control write behavior 3038 * @cd: pointer to command details structure or NULL 3039 * 3040 * This function is used to write MAC address to the NVM (0x0108). 3041 */ 3042 enum ice_status 3043 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 3044 struct ice_sq_cd *cd) 3045 { 3046 struct ice_aqc_manage_mac_write *cmd; 3047 struct ice_aq_desc desc; 3048 3049 cmd = &desc.params.mac_write; 3050 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 3051 3052 cmd->flags = flags; 3053 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); 3054 3055 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3056 } 3057 3058 /** 3059 * ice_aq_clear_pxe_mode 3060 * @hw: pointer to the HW struct 3061 * 3062 * Tell the firmware that the driver is taking over from PXE (0x0110). 3063 */ 3064 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 3065 { 3066 struct ice_aq_desc desc; 3067 3068 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3069 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3070 3071 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3072 } 3073 3074 /** 3075 * ice_clear_pxe_mode - clear pxe operations mode 3076 * @hw: pointer to the HW struct 3077 * 3078 * Make sure all PXE mode settings are cleared, including things 3079 * like descriptor fetch/write-back mode. 3080 */ 3081 void ice_clear_pxe_mode(struct ice_hw *hw) 3082 { 3083 if (ice_check_sq_alive(hw, &hw->adminq)) 3084 ice_aq_clear_pxe_mode(hw); 3085 } 3086 3087 /** 3088 * ice_aq_set_port_params - set physical port parameters. 3089 * @pi: pointer to the port info struct 3090 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded 3091 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI 3092 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded 3093 * @double_vlan: if set double VLAN is enabled 3094 * @cd: pointer to command details structure or NULL 3095 * 3096 * Set Physical port parameters (0x0203) 3097 */ 3098 enum ice_status 3099 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, 3100 bool save_bad_pac, bool pad_short_pac, bool double_vlan, 3101 struct ice_sq_cd *cd) 3102 { 3103 struct ice_aqc_set_port_params *cmd; 3104 struct ice_hw *hw = pi->hw; 3105 struct ice_aq_desc desc; 3106 u16 cmd_flags = 0; 3107 3108 cmd = &desc.params.set_port_params; 3109 3110 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3111 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi); 3112 if (save_bad_pac) 3113 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS; 3114 if (pad_short_pac) 3115 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS; 3116 if (double_vlan) 3117 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3118 cmd->cmd_flags = CPU_TO_LE16(cmd_flags); 3119 3120 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3121 } 3122 3123 /** 3124 * ice_is_100m_speed_supported 3125 * @hw: pointer to the HW struct 3126 * 3127 * returns true if 100M speeds are supported by the device, 3128 * false otherwise. 3129 */ 3130 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3131 { 3132 switch (hw->device_id) { 3133 case ICE_DEV_ID_E822C_SGMII: 3134 case ICE_DEV_ID_E822L_SGMII: 3135 case ICE_DEV_ID_E823L_1GBE: 3136 case ICE_DEV_ID_E823C_SGMII: 3137 return true; 3138 default: 3139 return false; 3140 } 3141 } 3142 3143 /** 3144 * ice_get_link_speed_based_on_phy_type - returns link speed 3145 * @phy_type_low: lower part of phy_type 3146 * @phy_type_high: higher part of phy_type 3147 * 3148 * This helper function will convert an entry in PHY type structure 3149 * [phy_type_low, phy_type_high] to its corresponding link speed. 3150 * Note: In the structure of [phy_type_low, phy_type_high], there should 3151 * be one bit set, as this function will convert one PHY type to its 3152 * speed. 3153 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3154 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3155 */ 3156 static u16 3157 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3158 { 3159 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3160 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3161 3162 switch (phy_type_low) { 3163 case ICE_PHY_TYPE_LOW_100BASE_TX: 3164 case ICE_PHY_TYPE_LOW_100M_SGMII: 3165 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3166 break; 3167 case ICE_PHY_TYPE_LOW_1000BASE_T: 3168 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3169 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3170 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3171 case ICE_PHY_TYPE_LOW_1G_SGMII: 3172 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3173 break; 3174 case ICE_PHY_TYPE_LOW_2500BASE_T: 3175 case ICE_PHY_TYPE_LOW_2500BASE_X: 3176 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3177 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3178 break; 3179 case ICE_PHY_TYPE_LOW_5GBASE_T: 3180 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3181 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3182 break; 3183 case ICE_PHY_TYPE_LOW_10GBASE_T: 3184 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3185 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3186 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3187 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3188 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3189 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3190 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3191 break; 3192 case ICE_PHY_TYPE_LOW_25GBASE_T: 3193 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3194 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3195 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3196 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3197 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3198 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3199 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3200 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3201 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3202 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3203 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3204 break; 3205 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3206 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3207 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3208 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3209 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3210 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3211 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3212 break; 3213 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3214 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3215 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3216 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3217 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3218 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3219 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3220 case ICE_PHY_TYPE_LOW_50G_AUI2: 3221 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3222 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3223 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3224 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3225 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3226 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3227 case ICE_PHY_TYPE_LOW_50G_AUI1: 3228 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3229 break; 3230 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3231 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3232 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3233 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3234 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3235 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3236 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3237 case ICE_PHY_TYPE_LOW_100G_AUI4: 3238 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3239 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3240 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3241 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3242 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3243 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3244 break; 3245 default: 3246 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3247 break; 3248 } 3249 3250 switch (phy_type_high) { 3251 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3252 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3253 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3254 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3255 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3256 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3257 break; 3258 default: 3259 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3260 break; 3261 } 3262 3263 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3264 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3265 return ICE_AQ_LINK_SPEED_UNKNOWN; 3266 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3267 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3268 return ICE_AQ_LINK_SPEED_UNKNOWN; 3269 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3270 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3271 return speed_phy_type_low; 3272 else 3273 return speed_phy_type_high; 3274 } 3275 3276 /** 3277 * ice_update_phy_type 3278 * @phy_type_low: pointer to the lower part of phy_type 3279 * @phy_type_high: pointer to the higher part of phy_type 3280 * @link_speeds_bitmap: targeted link speeds bitmap 3281 * 3282 * Note: For the link_speeds_bitmap structure, you can check it at 3283 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3284 * link_speeds_bitmap include multiple speeds. 3285 * 3286 * Each entry in this [phy_type_low, phy_type_high] structure will 3287 * present a certain link speed. This helper function will turn on bits 3288 * in [phy_type_low, phy_type_high] structure based on the value of 3289 * link_speeds_bitmap input parameter. 3290 */ 3291 void 3292 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3293 u16 link_speeds_bitmap) 3294 { 3295 u64 pt_high; 3296 u64 pt_low; 3297 int index; 3298 u16 speed; 3299 3300 /* We first check with low part of phy_type */ 3301 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3302 pt_low = BIT_ULL(index); 3303 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3304 3305 if (link_speeds_bitmap & speed) 3306 *phy_type_low |= BIT_ULL(index); 3307 } 3308 3309 /* We then check with high part of phy_type */ 3310 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3311 pt_high = BIT_ULL(index); 3312 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3313 3314 if (link_speeds_bitmap & speed) 3315 *phy_type_high |= BIT_ULL(index); 3316 } 3317 } 3318 3319 /** 3320 * ice_aq_set_phy_cfg 3321 * @hw: pointer to the HW struct 3322 * @pi: port info structure of the interested logical port 3323 * @cfg: structure with PHY configuration data to be set 3324 * @cd: pointer to command details structure or NULL 3325 * 3326 * Set the various PHY configuration parameters supported on the Port. 3327 * One or more of the Set PHY config parameters may be ignored in an MFP 3328 * mode as the PF may not have the privilege to set some of the PHY Config 3329 * parameters. This status will be indicated by the command response (0x0601). 3330 */ 3331 enum ice_status 3332 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3333 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3334 { 3335 struct ice_aq_desc desc; 3336 enum ice_status status; 3337 3338 if (!cfg) 3339 return ICE_ERR_PARAM; 3340 3341 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3342 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3343 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3344 cfg->caps); 3345 3346 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3347 } 3348 3349 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3350 desc.params.set_phy.lport_num = pi->lport; 3351 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 3352 3353 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3354 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3355 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low)); 3356 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3357 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high)); 3358 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3359 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3360 cfg->low_power_ctrl_an); 3361 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3362 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3363 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3364 cfg->link_fec_opt); 3365 3366 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3367 3368 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3369 status = ICE_SUCCESS; 3370 3371 if (!status) 3372 pi->phy.curr_user_phy_cfg = *cfg; 3373 3374 return status; 3375 } 3376 3377 /** 3378 * ice_update_link_info - update status of the HW network link 3379 * @pi: port info structure of the interested logical port 3380 */ 3381 enum ice_status ice_update_link_info(struct ice_port_info *pi) 3382 { 3383 struct ice_link_status *li; 3384 enum ice_status status; 3385 3386 if (!pi) 3387 return ICE_ERR_PARAM; 3388 3389 li = &pi->phy.link_info; 3390 3391 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3392 if (status) 3393 return status; 3394 3395 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3396 struct ice_aqc_get_phy_caps_data *pcaps; 3397 struct ice_hw *hw; 3398 3399 hw = pi->hw; 3400 pcaps = (struct ice_aqc_get_phy_caps_data *) 3401 ice_malloc(hw, sizeof(*pcaps)); 3402 if (!pcaps) 3403 return ICE_ERR_NO_MEMORY; 3404 3405 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3406 pcaps, NULL); 3407 3408 if (status == ICE_SUCCESS) 3409 ice_memcpy(li->module_type, &pcaps->module_type, 3410 sizeof(li->module_type), 3411 ICE_NONDMA_TO_NONDMA); 3412 3413 ice_free(hw, pcaps); 3414 } 3415 3416 return status; 3417 } 3418 3419 /** 3420 * ice_cache_phy_user_req 3421 * @pi: port information structure 3422 * @cache_data: PHY logging data 3423 * @cache_mode: PHY logging mode 3424 * 3425 * Log the user request on (FC, FEC, SPEED) for later user. 3426 */ 3427 static void 3428 ice_cache_phy_user_req(struct ice_port_info *pi, 3429 struct ice_phy_cache_mode_data cache_data, 3430 enum ice_phy_cache_mode cache_mode) 3431 { 3432 if (!pi) 3433 return; 3434 3435 switch (cache_mode) { 3436 case ICE_FC_MODE: 3437 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3438 break; 3439 case ICE_SPEED_MODE: 3440 pi->phy.curr_user_speed_req = 3441 cache_data.data.curr_user_speed_req; 3442 break; 3443 case ICE_FEC_MODE: 3444 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3445 break; 3446 default: 3447 break; 3448 } 3449 } 3450 3451 /** 3452 * ice_caps_to_fc_mode 3453 * @caps: PHY capabilities 3454 * 3455 * Convert PHY FC capabilities to ice FC mode 3456 */ 3457 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3458 { 3459 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3460 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3461 return ICE_FC_FULL; 3462 3463 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3464 return ICE_FC_TX_PAUSE; 3465 3466 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3467 return ICE_FC_RX_PAUSE; 3468 3469 return ICE_FC_NONE; 3470 } 3471 3472 /** 3473 * ice_caps_to_fec_mode 3474 * @caps: PHY capabilities 3475 * @fec_options: Link FEC options 3476 * 3477 * Convert PHY FEC capabilities to ice FEC mode 3478 */ 3479 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3480 { 3481 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) { 3482 if (fec_options & ICE_AQC_PHY_FEC_DIS) 3483 return ICE_FEC_DIS_AUTO; 3484 else 3485 return ICE_FEC_AUTO; 3486 } 3487 3488 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3489 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3490 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3491 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3492 return ICE_FEC_BASER; 3493 3494 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3495 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3496 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3497 return ICE_FEC_RS; 3498 3499 return ICE_FEC_NONE; 3500 } 3501 3502 /** 3503 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3504 * @pi: port information structure 3505 * @cfg: PHY configuration data to set FC mode 3506 * @req_mode: FC mode to configure 3507 */ 3508 static enum ice_status 3509 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3510 enum ice_fc_mode req_mode) 3511 { 3512 struct ice_phy_cache_mode_data cache_data; 3513 u8 pause_mask = 0x0; 3514 3515 if (!pi || !cfg) 3516 return ICE_ERR_BAD_PTR; 3517 switch (req_mode) { 3518 case ICE_FC_AUTO: 3519 { 3520 struct ice_aqc_get_phy_caps_data *pcaps; 3521 enum ice_status status; 3522 3523 pcaps = (struct ice_aqc_get_phy_caps_data *) 3524 ice_malloc(pi->hw, sizeof(*pcaps)); 3525 if (!pcaps) 3526 return ICE_ERR_NO_MEMORY; 3527 /* Query the value of FC that both the NIC and attached media 3528 * can do. 3529 */ 3530 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3531 pcaps, NULL); 3532 if (status) { 3533 ice_free(pi->hw, pcaps); 3534 return status; 3535 } 3536 3537 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3538 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3539 3540 ice_free(pi->hw, pcaps); 3541 break; 3542 } 3543 case ICE_FC_FULL: 3544 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3545 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3546 break; 3547 case ICE_FC_RX_PAUSE: 3548 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3549 break; 3550 case ICE_FC_TX_PAUSE: 3551 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3552 break; 3553 default: 3554 break; 3555 } 3556 3557 /* clear the old pause settings */ 3558 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3559 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3560 3561 /* set the new capabilities */ 3562 cfg->caps |= pause_mask; 3563 3564 /* Cache user FC request */ 3565 cache_data.data.curr_user_fc_req = req_mode; 3566 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3567 3568 return ICE_SUCCESS; 3569 } 3570 3571 /** 3572 * ice_set_fc 3573 * @pi: port information structure 3574 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3575 * @ena_auto_link_update: enable automatic link update 3576 * 3577 * Set the requested flow control mode. 3578 */ 3579 enum ice_status 3580 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3581 { 3582 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3583 struct ice_aqc_get_phy_caps_data *pcaps; 3584 enum ice_status status; 3585 struct ice_hw *hw; 3586 3587 if (!pi || !aq_failures) 3588 return ICE_ERR_BAD_PTR; 3589 3590 *aq_failures = 0; 3591 hw = pi->hw; 3592 3593 pcaps = (struct ice_aqc_get_phy_caps_data *) 3594 ice_malloc(hw, sizeof(*pcaps)); 3595 if (!pcaps) 3596 return ICE_ERR_NO_MEMORY; 3597 3598 /* Get the current PHY config */ 3599 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3600 pcaps, NULL); 3601 3602 if (status) { 3603 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3604 goto out; 3605 } 3606 3607 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3608 3609 /* Configure the set PHY data */ 3610 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3611 if (status) { 3612 if (status != ICE_ERR_BAD_PTR) 3613 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3614 3615 goto out; 3616 } 3617 3618 /* If the capabilities have changed, then set the new config */ 3619 if (cfg.caps != pcaps->caps) { 3620 int retry_count, retry_max = 10; 3621 3622 /* Auto restart link so settings take effect */ 3623 if (ena_auto_link_update) 3624 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3625 3626 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3627 if (status) { 3628 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3629 goto out; 3630 } 3631 3632 /* Update the link info 3633 * It sometimes takes a really long time for link to 3634 * come back from the atomic reset. Thus, we wait a 3635 * little bit. 3636 */ 3637 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3638 status = ice_update_link_info(pi); 3639 3640 if (status == ICE_SUCCESS) 3641 break; 3642 3643 ice_msec_delay(100, true); 3644 } 3645 3646 if (status) 3647 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3648 } 3649 3650 out: 3651 ice_free(hw, pcaps); 3652 return status; 3653 } 3654 3655 /** 3656 * ice_phy_caps_equals_cfg 3657 * @phy_caps: PHY capabilities 3658 * @phy_cfg: PHY configuration 3659 * 3660 * Helper function to determine if PHY capabilities matches PHY 3661 * configuration 3662 */ 3663 bool 3664 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3665 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3666 { 3667 u8 caps_mask, cfg_mask; 3668 3669 if (!phy_caps || !phy_cfg) 3670 return false; 3671 3672 /* These bits are not common between capabilities and configuration. 3673 * Do not use them to determine equality. 3674 */ 3675 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3676 ICE_AQC_PHY_EN_MOD_QUAL); 3677 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3678 3679 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3680 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3681 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3682 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3683 phy_caps->eee_cap != phy_cfg->eee_cap || 3684 phy_caps->eeer_value != phy_cfg->eeer_value || 3685 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3686 return false; 3687 3688 return true; 3689 } 3690 3691 /** 3692 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3693 * @pi: port information structure 3694 * @caps: PHY ability structure to copy data from 3695 * @cfg: PHY configuration structure to copy data to 3696 * 3697 * Helper function to copy AQC PHY get ability data to PHY set configuration 3698 * data structure 3699 */ 3700 void 3701 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3702 struct ice_aqc_get_phy_caps_data *caps, 3703 struct ice_aqc_set_phy_cfg_data *cfg) 3704 { 3705 if (!pi || !caps || !cfg) 3706 return; 3707 3708 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM); 3709 cfg->phy_type_low = caps->phy_type_low; 3710 cfg->phy_type_high = caps->phy_type_high; 3711 cfg->caps = caps->caps; 3712 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3713 cfg->eee_cap = caps->eee_cap; 3714 cfg->eeer_value = caps->eeer_value; 3715 cfg->link_fec_opt = caps->link_fec_options; 3716 cfg->module_compliance_enforcement = 3717 caps->module_compliance_enforcement; 3718 } 3719 3720 /** 3721 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3722 * @pi: port information structure 3723 * @cfg: PHY configuration data to set FEC mode 3724 * @fec: FEC mode to configure 3725 */ 3726 enum ice_status 3727 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3728 enum ice_fec_mode fec) 3729 { 3730 struct ice_aqc_get_phy_caps_data *pcaps; 3731 enum ice_status status = ICE_SUCCESS; 3732 struct ice_hw *hw; 3733 3734 if (!pi || !cfg) 3735 return ICE_ERR_BAD_PTR; 3736 3737 hw = pi->hw; 3738 3739 pcaps = (struct ice_aqc_get_phy_caps_data *) 3740 ice_malloc(hw, sizeof(*pcaps)); 3741 if (!pcaps) 3742 return ICE_ERR_NO_MEMORY; 3743 3744 status = ice_aq_get_phy_caps(pi, false, 3745 (ice_fw_supports_report_dflt_cfg(hw) ? 3746 ICE_AQC_REPORT_DFLT_CFG : 3747 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3748 3749 if (status) 3750 goto out; 3751 3752 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC); 3753 cfg->link_fec_opt = pcaps->link_fec_options; 3754 3755 switch (fec) { 3756 case ICE_FEC_BASER: 3757 /* Clear RS bits, and AND BASE-R ability 3758 * bits and OR request bits. 3759 */ 3760 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3761 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3762 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3763 ICE_AQC_PHY_FEC_25G_KR_REQ; 3764 break; 3765 case ICE_FEC_RS: 3766 /* Clear BASE-R bits, and AND RS ability 3767 * bits and OR request bits. 3768 */ 3769 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3770 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3771 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3772 break; 3773 case ICE_FEC_NONE: 3774 /* Clear all FEC option bits. */ 3775 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3776 break; 3777 case ICE_FEC_DIS_AUTO: 3778 /* Set No FEC and auto FEC */ 3779 if (!ice_fw_supports_fec_dis_auto(hw)) 3780 return ICE_ERR_NOT_SUPPORTED; 3781 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS; 3782 /* fall-through */ 3783 case ICE_FEC_AUTO: 3784 /* AND auto FEC bit, and all caps bits. */ 3785 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3786 cfg->link_fec_opt |= pcaps->link_fec_options; 3787 break; 3788 default: 3789 status = ICE_ERR_PARAM; 3790 break; 3791 } 3792 3793 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) && 3794 !ice_fw_supports_report_dflt_cfg(pi->hw)) { 3795 struct ice_link_default_override_tlv tlv; 3796 3797 if (ice_get_link_default_override(&tlv, pi)) 3798 goto out; 3799 3800 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3801 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3802 cfg->link_fec_opt = tlv.fec_options; 3803 } 3804 3805 out: 3806 ice_free(hw, pcaps); 3807 3808 return status; 3809 } 3810 3811 /** 3812 * ice_get_link_status - get status of the HW network link 3813 * @pi: port information structure 3814 * @link_up: pointer to bool (true/false = linkup/linkdown) 3815 * 3816 * Variable link_up is true if link is up, false if link is down. 3817 * The variable link_up is invalid if status is non zero. As a 3818 * result of this call, link status reporting becomes enabled 3819 */ 3820 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3821 { 3822 struct ice_phy_info *phy_info; 3823 enum ice_status status = ICE_SUCCESS; 3824 3825 if (!pi || !link_up) 3826 return ICE_ERR_PARAM; 3827 3828 phy_info = &pi->phy; 3829 3830 if (phy_info->get_link_info) { 3831 status = ice_update_link_info(pi); 3832 3833 if (status) 3834 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3835 status); 3836 } 3837 3838 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3839 3840 return status; 3841 } 3842 3843 /** 3844 * ice_aq_set_link_restart_an 3845 * @pi: pointer to the port information structure 3846 * @ena_link: if true: enable link, if false: disable link 3847 * @cd: pointer to command details structure or NULL 3848 * 3849 * Sets up the link and restarts the Auto-Negotiation over the link. 3850 */ 3851 enum ice_status 3852 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3853 struct ice_sq_cd *cd) 3854 { 3855 struct ice_aqc_restart_an *cmd; 3856 struct ice_aq_desc desc; 3857 3858 cmd = &desc.params.restart_an; 3859 3860 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3861 3862 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3863 cmd->lport_num = pi->lport; 3864 if (ena_link) 3865 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3866 else 3867 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3868 3869 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3870 } 3871 3872 /** 3873 * ice_aq_set_event_mask 3874 * @hw: pointer to the HW struct 3875 * @port_num: port number of the physical function 3876 * @mask: event mask to be set 3877 * @cd: pointer to command details structure or NULL 3878 * 3879 * Set event mask (0x0613) 3880 */ 3881 enum ice_status 3882 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3883 struct ice_sq_cd *cd) 3884 { 3885 struct ice_aqc_set_event_mask *cmd; 3886 struct ice_aq_desc desc; 3887 3888 cmd = &desc.params.set_event_mask; 3889 3890 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3891 3892 cmd->lport_num = port_num; 3893 3894 cmd->event_mask = CPU_TO_LE16(mask); 3895 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3896 } 3897 3898 /** 3899 * ice_aq_set_mac_loopback 3900 * @hw: pointer to the HW struct 3901 * @ena_lpbk: Enable or Disable loopback 3902 * @cd: pointer to command details structure or NULL 3903 * 3904 * Enable/disable loopback on a given port 3905 */ 3906 enum ice_status 3907 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3908 { 3909 struct ice_aqc_set_mac_lb *cmd; 3910 struct ice_aq_desc desc; 3911 3912 cmd = &desc.params.set_mac_lb; 3913 3914 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3915 if (ena_lpbk) 3916 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3917 3918 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3919 } 3920 3921 /** 3922 * ice_aq_set_port_id_led 3923 * @pi: pointer to the port information 3924 * @is_orig_mode: is this LED set to original mode (by the net-list) 3925 * @cd: pointer to command details structure or NULL 3926 * 3927 * Set LED value for the given port (0x06e9) 3928 */ 3929 enum ice_status 3930 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3931 struct ice_sq_cd *cd) 3932 { 3933 struct ice_aqc_set_port_id_led *cmd; 3934 struct ice_hw *hw = pi->hw; 3935 struct ice_aq_desc desc; 3936 3937 cmd = &desc.params.set_port_id_led; 3938 3939 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3940 3941 if (is_orig_mode) 3942 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3943 else 3944 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3945 3946 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3947 } 3948 3949 /** 3950 * ice_aq_sff_eeprom 3951 * @hw: pointer to the HW struct 3952 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3953 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3954 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3955 * @page: QSFP page 3956 * @set_page: set or ignore the page 3957 * @data: pointer to data buffer to be read/written to the I2C device. 3958 * @length: 1-16 for read, 1 for write. 3959 * @write: 0 read, 1 for write. 3960 * @cd: pointer to command details structure or NULL 3961 * 3962 * Read/Write SFF EEPROM (0x06EE) 3963 */ 3964 enum ice_status 3965 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3966 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3967 bool write, struct ice_sq_cd *cd) 3968 { 3969 struct ice_aqc_sff_eeprom *cmd; 3970 struct ice_aq_desc desc; 3971 enum ice_status status; 3972 3973 if (!data || (mem_addr & 0xff00)) 3974 return ICE_ERR_PARAM; 3975 3976 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3977 cmd = &desc.params.read_write_sff_param; 3978 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD); 3979 cmd->lport_num = (u8)(lport & 0xff); 3980 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3981 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) & 3982 ICE_AQC_SFF_I2CBUS_7BIT_M) | 3983 ((set_page << 3984 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 3985 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 3986 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff); 3987 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 3988 if (write) 3989 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE); 3990 3991 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3992 return status; 3993 } 3994 3995 /** 3996 * ice_aq_prog_topo_dev_nvm 3997 * @hw: pointer to the hardware structure 3998 * @topo_params: pointer to structure storing topology parameters for a device 3999 * @cd: pointer to command details structure or NULL 4000 * 4001 * Program Topology Device NVM (0x06F2) 4002 * 4003 */ 4004 enum ice_status 4005 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw, 4006 struct ice_aqc_link_topo_params *topo_params, 4007 struct ice_sq_cd *cd) 4008 { 4009 struct ice_aqc_prog_topo_dev_nvm *cmd; 4010 struct ice_aq_desc desc; 4011 4012 cmd = &desc.params.prog_topo_dev_nvm; 4013 4014 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm); 4015 4016 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params), 4017 ICE_NONDMA_TO_NONDMA); 4018 4019 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4020 } 4021 4022 /** 4023 * ice_aq_read_topo_dev_nvm 4024 * @hw: pointer to the hardware structure 4025 * @topo_params: pointer to structure storing topology parameters for a device 4026 * @start_address: byte offset in the topology device NVM 4027 * @data: pointer to data buffer 4028 * @data_size: number of bytes to be read from the topology device NVM 4029 * @cd: pointer to command details structure or NULL 4030 * Read Topology Device NVM (0x06F3) 4031 * 4032 */ 4033 enum ice_status 4034 ice_aq_read_topo_dev_nvm(struct ice_hw *hw, 4035 struct ice_aqc_link_topo_params *topo_params, 4036 u32 start_address, u8 *data, u8 data_size, 4037 struct ice_sq_cd *cd) 4038 { 4039 struct ice_aqc_read_topo_dev_nvm *cmd; 4040 struct ice_aq_desc desc; 4041 enum ice_status status; 4042 4043 if (!data || data_size == 0 || 4044 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE) 4045 return ICE_ERR_PARAM; 4046 4047 cmd = &desc.params.read_topo_dev_nvm; 4048 4049 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm); 4050 4051 desc.datalen = CPU_TO_LE16(data_size); 4052 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params), 4053 ICE_NONDMA_TO_NONDMA); 4054 cmd->start_address = CPU_TO_LE32(start_address); 4055 4056 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4057 if (status) 4058 return status; 4059 4060 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA); 4061 4062 return ICE_SUCCESS; 4063 } 4064 4065 /** 4066 * __ice_aq_get_set_rss_lut 4067 * @hw: pointer to the hardware structure 4068 * @params: RSS LUT parameters 4069 * @set: set true to set the table, false to get the table 4070 * 4071 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4072 */ 4073 static enum ice_status 4074 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) 4075 { 4076 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; 4077 struct ice_aqc_get_set_rss_lut *cmd_resp; 4078 struct ice_aq_desc desc; 4079 enum ice_status status; 4080 u8 *lut; 4081 4082 if (!params) 4083 return ICE_ERR_PARAM; 4084 4085 vsi_handle = params->vsi_handle; 4086 lut = params->lut; 4087 4088 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 4089 return ICE_ERR_PARAM; 4090 4091 lut_size = params->lut_size; 4092 lut_type = params->lut_type; 4093 glob_lut_idx = params->global_lut_id; 4094 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4095 4096 cmd_resp = &desc.params.get_set_rss_lut; 4097 4098 if (set) { 4099 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 4100 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4101 } else { 4102 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 4103 } 4104 4105 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << 4106 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 4107 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 4108 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 4109 4110 switch (lut_type) { 4111 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 4112 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 4113 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 4114 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 4115 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 4116 break; 4117 default: 4118 status = ICE_ERR_PARAM; 4119 goto ice_aq_get_set_rss_lut_exit; 4120 } 4121 4122 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 4123 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 4124 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 4125 4126 if (!set) 4127 goto ice_aq_get_set_rss_lut_send; 4128 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 4129 if (!set) 4130 goto ice_aq_get_set_rss_lut_send; 4131 } else { 4132 goto ice_aq_get_set_rss_lut_send; 4133 } 4134 4135 /* LUT size is only valid for Global and PF table types */ 4136 switch (lut_size) { 4137 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 4138 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG << 4139 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 4140 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 4141 break; 4142 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 4143 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 4144 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 4145 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 4146 break; 4147 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 4148 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 4149 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 4150 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 4151 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 4152 break; 4153 } 4154 /* fall-through */ 4155 default: 4156 status = ICE_ERR_PARAM; 4157 goto ice_aq_get_set_rss_lut_exit; 4158 } 4159 4160 ice_aq_get_set_rss_lut_send: 4161 cmd_resp->flags = CPU_TO_LE16(flags); 4162 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4163 4164 ice_aq_get_set_rss_lut_exit: 4165 return status; 4166 } 4167 4168 /** 4169 * ice_aq_get_rss_lut 4170 * @hw: pointer to the hardware structure 4171 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4172 * 4173 * get the RSS lookup table, PF or VSI type 4174 */ 4175 enum ice_status 4176 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4177 { 4178 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4179 } 4180 4181 /** 4182 * ice_aq_set_rss_lut 4183 * @hw: pointer to the hardware structure 4184 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4185 * 4186 * set the RSS lookup table, PF or VSI type 4187 */ 4188 enum ice_status 4189 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4190 { 4191 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4192 } 4193 4194 /** 4195 * __ice_aq_get_set_rss_key 4196 * @hw: pointer to the HW struct 4197 * @vsi_id: VSI FW index 4198 * @key: pointer to key info struct 4199 * @set: set true to set the key, false to get the key 4200 * 4201 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4202 */ 4203 static enum 4204 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4205 struct ice_aqc_get_set_rss_keys *key, 4206 bool set) 4207 { 4208 struct ice_aqc_get_set_rss_key *cmd_resp; 4209 u16 key_size = sizeof(*key); 4210 struct ice_aq_desc desc; 4211 4212 cmd_resp = &desc.params.get_set_rss_key; 4213 4214 if (set) { 4215 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4216 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4217 } else { 4218 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4219 } 4220 4221 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << 4222 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 4223 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 4224 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 4225 4226 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4227 } 4228 4229 /** 4230 * ice_aq_get_rss_key 4231 * @hw: pointer to the HW struct 4232 * @vsi_handle: software VSI handle 4233 * @key: pointer to key info struct 4234 * 4235 * get the RSS key per VSI 4236 */ 4237 enum ice_status 4238 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4239 struct ice_aqc_get_set_rss_keys *key) 4240 { 4241 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4242 return ICE_ERR_PARAM; 4243 4244 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4245 key, false); 4246 } 4247 4248 /** 4249 * ice_aq_set_rss_key 4250 * @hw: pointer to the HW struct 4251 * @vsi_handle: software VSI handle 4252 * @keys: pointer to key info struct 4253 * 4254 * set the RSS key per VSI 4255 */ 4256 enum ice_status 4257 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4258 struct ice_aqc_get_set_rss_keys *keys) 4259 { 4260 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4261 return ICE_ERR_PARAM; 4262 4263 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4264 keys, true); 4265 } 4266 4267 /** 4268 * ice_aq_add_lan_txq 4269 * @hw: pointer to the hardware structure 4270 * @num_qgrps: Number of added queue groups 4271 * @qg_list: list of queue groups to be added 4272 * @buf_size: size of buffer for indirect command 4273 * @cd: pointer to command details structure or NULL 4274 * 4275 * Add Tx LAN queue (0x0C30) 4276 * 4277 * NOTE: 4278 * Prior to calling add Tx LAN queue: 4279 * Initialize the following as part of the Tx queue context: 4280 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4281 * Cache profile and Packet shaper profile. 4282 * 4283 * After add Tx LAN queue AQ command is completed: 4284 * Interrupts should be associated with specific queues, 4285 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4286 * flow. 4287 */ 4288 enum ice_status 4289 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4290 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4291 struct ice_sq_cd *cd) 4292 { 4293 struct ice_aqc_add_tx_qgrp *list; 4294 struct ice_aqc_add_txqs *cmd; 4295 struct ice_aq_desc desc; 4296 u16 i, sum_size = 0; 4297 4298 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 4299 4300 cmd = &desc.params.add_txqs; 4301 4302 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4303 4304 if (!qg_list) 4305 return ICE_ERR_PARAM; 4306 4307 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4308 return ICE_ERR_PARAM; 4309 4310 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4311 sum_size += ice_struct_size(list, txqs, list->num_txqs); 4312 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4313 list->num_txqs); 4314 } 4315 4316 if (buf_size != sum_size) 4317 return ICE_ERR_PARAM; 4318 4319 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4320 4321 cmd->num_qgrps = num_qgrps; 4322 4323 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4324 } 4325 4326 /** 4327 * ice_aq_dis_lan_txq 4328 * @hw: pointer to the hardware structure 4329 * @num_qgrps: number of groups in the list 4330 * @qg_list: the list of groups to disable 4331 * @buf_size: the total size of the qg_list buffer in bytes 4332 * @rst_src: if called due to reset, specifies the reset source 4333 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4334 * @cd: pointer to command details structure or NULL 4335 * 4336 * Disable LAN Tx queue (0x0C31) 4337 */ 4338 static enum ice_status 4339 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4340 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4341 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4342 struct ice_sq_cd *cd) 4343 { 4344 struct ice_aqc_dis_txq_item *item; 4345 struct ice_aqc_dis_txqs *cmd; 4346 struct ice_aq_desc desc; 4347 enum ice_status status; 4348 u16 i, sz = 0; 4349 4350 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 4351 cmd = &desc.params.dis_txqs; 4352 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4353 4354 /* qg_list can be NULL only in VM/VF reset flow */ 4355 if (!qg_list && !rst_src) 4356 return ICE_ERR_PARAM; 4357 4358 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4359 return ICE_ERR_PARAM; 4360 4361 cmd->num_entries = num_qgrps; 4362 4363 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 4364 ICE_AQC_Q_DIS_TIMEOUT_M); 4365 4366 switch (rst_src) { 4367 case ICE_VM_RESET: 4368 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4369 cmd->vmvf_and_timeout |= 4370 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 4371 break; 4372 case ICE_VF_RESET: 4373 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4374 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4375 cmd->vmvf_and_timeout |= 4376 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) & 4377 ICE_AQC_Q_DIS_VMVF_NUM_M); 4378 break; 4379 case ICE_NO_RESET: 4380 default: 4381 break; 4382 } 4383 4384 /* flush pipe on time out */ 4385 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4386 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4387 if (!qg_list) 4388 goto do_aq; 4389 4390 /* set RD bit to indicate that command buffer is provided by the driver 4391 * and it needs to be read by the firmware 4392 */ 4393 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4394 4395 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4396 u16 item_size = ice_struct_size(item, q_id, item->num_qs); 4397 4398 /* If the num of queues is even, add 2 bytes of padding */ 4399 if ((item->num_qs % 2) == 0) 4400 item_size += 2; 4401 4402 sz += item_size; 4403 4404 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4405 } 4406 4407 if (buf_size != sz) 4408 return ICE_ERR_PARAM; 4409 4410 do_aq: 4411 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4412 if (status) { 4413 if (!qg_list) 4414 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4415 vmvf_num, hw->adminq.sq_last_status); 4416 else 4417 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4418 LE16_TO_CPU(qg_list[0].q_id[0]), 4419 hw->adminq.sq_last_status); 4420 } 4421 return status; 4422 } 4423 4424 /** 4425 * ice_aq_move_recfg_lan_txq 4426 * @hw: pointer to the hardware structure 4427 * @num_qs: number of queues to move/reconfigure 4428 * @is_move: true if this operation involves node movement 4429 * @is_tc_change: true if this operation involves a TC change 4430 * @subseq_call: true if this operation is a subsequent call 4431 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN 4432 * @timeout: timeout in units of 100 usec (valid values 0-50) 4433 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN 4434 * @buf: struct containing src/dest TEID and per-queue info 4435 * @buf_size: size of buffer for indirect command 4436 * @txqs_moved: out param, number of queues successfully moved 4437 * @cd: pointer to command details structure or NULL 4438 * 4439 * Move / Reconfigure Tx LAN queues (0x0C32) 4440 */ 4441 enum ice_status 4442 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, 4443 bool is_tc_change, bool subseq_call, bool flush_pipe, 4444 u8 timeout, u32 *blocked_cgds, 4445 struct ice_aqc_move_txqs_data *buf, u16 buf_size, 4446 u8 *txqs_moved, struct ice_sq_cd *cd) 4447 { 4448 struct ice_aqc_move_txqs *cmd; 4449 struct ice_aq_desc desc; 4450 enum ice_status status; 4451 4452 cmd = &desc.params.move_txqs; 4453 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs); 4454 4455 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50 4456 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX) 4457 return ICE_ERR_PARAM; 4458 4459 if (is_tc_change && !flush_pipe && !blocked_cgds) 4460 return ICE_ERR_PARAM; 4461 4462 if (!is_move && !is_tc_change) 4463 return ICE_ERR_PARAM; 4464 4465 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4466 4467 if (is_move) 4468 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE; 4469 4470 if (is_tc_change) 4471 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE; 4472 4473 if (subseq_call) 4474 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL; 4475 4476 if (flush_pipe) 4477 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE; 4478 4479 cmd->num_qs = num_qs; 4480 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) & 4481 ICE_AQC_Q_CMD_TIMEOUT_M); 4482 4483 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4484 4485 if (!status && txqs_moved) 4486 *txqs_moved = cmd->num_qs; 4487 4488 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN && 4489 is_tc_change && !flush_pipe) 4490 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds); 4491 4492 return status; 4493 } 4494 4495 /** 4496 * ice_aq_add_rdma_qsets 4497 * @hw: pointer to the hardware structure 4498 * @num_qset_grps: Number of RDMA Qset groups 4499 * @qset_list: list of qset groups to be added 4500 * @buf_size: size of buffer for indirect command 4501 * @cd: pointer to command details structure or NULL 4502 * 4503 * Add Tx RDMA Qsets (0x0C33) 4504 */ 4505 enum ice_status 4506 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4507 struct ice_aqc_add_rdma_qset_data *qset_list, 4508 u16 buf_size, struct ice_sq_cd *cd) 4509 { 4510 struct ice_aqc_add_rdma_qset_data *list; 4511 struct ice_aqc_add_rdma_qset *cmd; 4512 struct ice_aq_desc desc; 4513 u16 i, sum_size = 0; 4514 4515 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 4516 4517 cmd = &desc.params.add_rdma_qset; 4518 4519 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4520 4521 if (!qset_list) 4522 return ICE_ERR_PARAM; 4523 4524 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4525 return ICE_ERR_PARAM; 4526 4527 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4528 u16 num_qsets = LE16_TO_CPU(list->num_qsets); 4529 4530 sum_size += ice_struct_size(list, rdma_qsets, num_qsets); 4531 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4532 num_qsets); 4533 } 4534 4535 if (buf_size != sum_size) 4536 return ICE_ERR_PARAM; 4537 4538 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4539 4540 cmd->num_qset_grps = num_qset_grps; 4541 4542 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4543 } 4544 4545 /* End of FW Admin Queue command wrappers */ 4546 4547 /** 4548 * ice_write_byte - write a byte to a packed context structure 4549 * @src_ctx: the context structure to read from 4550 * @dest_ctx: the context to be written to 4551 * @ce_info: a description of the struct to be filled 4552 */ 4553 static void 4554 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4555 { 4556 u8 src_byte, dest_byte, mask; 4557 u8 *from, *dest; 4558 u16 shift_width; 4559 4560 /* copy from the next struct field */ 4561 from = src_ctx + ce_info->offset; 4562 4563 /* prepare the bits and mask */ 4564 shift_width = ce_info->lsb % 8; 4565 mask = (u8)(BIT(ce_info->width) - 1); 4566 4567 src_byte = *from; 4568 src_byte &= mask; 4569 4570 /* shift to correct alignment */ 4571 mask <<= shift_width; 4572 src_byte <<= shift_width; 4573 4574 /* get the current bits from the target bit string */ 4575 dest = dest_ctx + (ce_info->lsb / 8); 4576 4577 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA); 4578 4579 dest_byte &= ~mask; /* get the bits not changing */ 4580 dest_byte |= src_byte; /* add in the new bits */ 4581 4582 /* put it all back */ 4583 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); 4584 } 4585 4586 /** 4587 * ice_write_word - write a word to a packed context structure 4588 * @src_ctx: the context structure to read from 4589 * @dest_ctx: the context to be written to 4590 * @ce_info: a description of the struct to be filled 4591 */ 4592 static void 4593 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4594 { 4595 u16 src_word, mask; 4596 __le16 dest_word; 4597 u8 *from, *dest; 4598 u16 shift_width; 4599 4600 /* copy from the next struct field */ 4601 from = src_ctx + ce_info->offset; 4602 4603 /* prepare the bits and mask */ 4604 shift_width = ce_info->lsb % 8; 4605 mask = BIT(ce_info->width) - 1; 4606 4607 /* don't swizzle the bits until after the mask because the mask bits 4608 * will be in a different bit position on big endian machines 4609 */ 4610 src_word = *(u16 *)from; 4611 src_word &= mask; 4612 4613 /* shift to correct alignment */ 4614 mask <<= shift_width; 4615 src_word <<= shift_width; 4616 4617 /* get the current bits from the target bit string */ 4618 dest = dest_ctx + (ce_info->lsb / 8); 4619 4620 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA); 4621 4622 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */ 4623 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */ 4624 4625 /* put it all back */ 4626 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); 4627 } 4628 4629 /** 4630 * ice_write_dword - write a dword to a packed context structure 4631 * @src_ctx: the context structure to read from 4632 * @dest_ctx: the context to be written to 4633 * @ce_info: a description of the struct to be filled 4634 */ 4635 static void 4636 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4637 { 4638 u32 src_dword, mask; 4639 __le32 dest_dword; 4640 u8 *from, *dest; 4641 u16 shift_width; 4642 4643 /* copy from the next struct field */ 4644 from = src_ctx + ce_info->offset; 4645 4646 /* prepare the bits and mask */ 4647 shift_width = ce_info->lsb % 8; 4648 4649 /* if the field width is exactly 32 on an x86 machine, then the shift 4650 * operation will not work because the SHL instructions count is masked 4651 * to 5 bits so the shift will do nothing 4652 */ 4653 if (ce_info->width < 32) 4654 mask = BIT(ce_info->width) - 1; 4655 else 4656 mask = (u32)~0; 4657 4658 /* don't swizzle the bits until after the mask because the mask bits 4659 * will be in a different bit position on big endian machines 4660 */ 4661 src_dword = *(u32 *)from; 4662 src_dword &= mask; 4663 4664 /* shift to correct alignment */ 4665 mask <<= shift_width; 4666 src_dword <<= shift_width; 4667 4668 /* get the current bits from the target bit string */ 4669 dest = dest_ctx + (ce_info->lsb / 8); 4670 4671 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA); 4672 4673 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */ 4674 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */ 4675 4676 /* put it all back */ 4677 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); 4678 } 4679 4680 /** 4681 * ice_write_qword - write a qword to a packed context structure 4682 * @src_ctx: the context structure to read from 4683 * @dest_ctx: the context to be written to 4684 * @ce_info: a description of the struct to be filled 4685 */ 4686 static void 4687 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4688 { 4689 u64 src_qword, mask; 4690 __le64 dest_qword; 4691 u8 *from, *dest; 4692 u16 shift_width; 4693 4694 /* copy from the next struct field */ 4695 from = src_ctx + ce_info->offset; 4696 4697 /* prepare the bits and mask */ 4698 shift_width = ce_info->lsb % 8; 4699 4700 /* if the field width is exactly 64 on an x86 machine, then the shift 4701 * operation will not work because the SHL instructions count is masked 4702 * to 6 bits so the shift will do nothing 4703 */ 4704 if (ce_info->width < 64) 4705 mask = BIT_ULL(ce_info->width) - 1; 4706 else 4707 mask = (u64)~0; 4708 4709 /* don't swizzle the bits until after the mask because the mask bits 4710 * will be in a different bit position on big endian machines 4711 */ 4712 src_qword = *(u64 *)from; 4713 src_qword &= mask; 4714 4715 /* shift to correct alignment */ 4716 mask <<= shift_width; 4717 src_qword <<= shift_width; 4718 4719 /* get the current bits from the target bit string */ 4720 dest = dest_ctx + (ce_info->lsb / 8); 4721 4722 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA); 4723 4724 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */ 4725 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */ 4726 4727 /* put it all back */ 4728 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); 4729 } 4730 4731 /** 4732 * ice_set_ctx - set context bits in packed structure 4733 * @hw: pointer to the hardware structure 4734 * @src_ctx: pointer to a generic non-packed context structure 4735 * @dest_ctx: pointer to memory for the packed structure 4736 * @ce_info: a description of the structure to be transformed 4737 */ 4738 enum ice_status 4739 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4740 const struct ice_ctx_ele *ce_info) 4741 { 4742 int f; 4743 4744 for (f = 0; ce_info[f].width; f++) { 4745 /* We have to deal with each element of the FW response 4746 * using the correct size so that we are correct regardless 4747 * of the endianness of the machine. 4748 */ 4749 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4750 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4751 f, ce_info[f].width, ce_info[f].size_of); 4752 continue; 4753 } 4754 switch (ce_info[f].size_of) { 4755 case sizeof(u8): 4756 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4757 break; 4758 case sizeof(u16): 4759 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4760 break; 4761 case sizeof(u32): 4762 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4763 break; 4764 case sizeof(u64): 4765 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4766 break; 4767 default: 4768 return ICE_ERR_INVAL_SIZE; 4769 } 4770 } 4771 4772 return ICE_SUCCESS; 4773 } 4774 4775 /** 4776 * ice_aq_get_internal_data 4777 * @hw: pointer to the hardware structure 4778 * @cluster_id: specific cluster to dump 4779 * @table_id: table ID within cluster 4780 * @start: index of line in the block to read 4781 * @buf: dump buffer 4782 * @buf_size: dump buffer size 4783 * @ret_buf_size: return buffer size (returned by FW) 4784 * @ret_next_table: next block to read (returned by FW) 4785 * @ret_next_index: next index to read (returned by FW) 4786 * @cd: pointer to command details structure 4787 * 4788 * Get internal FW/HW data (0xFF08) for debug purposes. 4789 */ 4790 enum ice_status 4791 ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id, 4792 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size, 4793 u16 *ret_next_table, u32 *ret_next_index, 4794 struct ice_sq_cd *cd) 4795 { 4796 struct ice_aqc_debug_dump_internals *cmd; 4797 struct ice_aq_desc desc; 4798 enum ice_status status; 4799 4800 cmd = &desc.params.debug_dump; 4801 4802 if (buf_size == 0 || !buf) 4803 return ICE_ERR_PARAM; 4804 4805 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals); 4806 4807 cmd->cluster_id = cluster_id; 4808 cmd->table_id = CPU_TO_LE16(table_id); 4809 cmd->idx = CPU_TO_LE32(start); 4810 4811 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4812 4813 if (!status) { 4814 if (ret_buf_size) 4815 *ret_buf_size = LE16_TO_CPU(desc.datalen); 4816 if (ret_next_table) 4817 *ret_next_table = LE16_TO_CPU(cmd->table_id); 4818 if (ret_next_index) 4819 *ret_next_index = LE32_TO_CPU(cmd->idx); 4820 } 4821 4822 return status; 4823 } 4824 4825 /** 4826 * ice_read_byte - read context byte into struct 4827 * @src_ctx: the context structure to read from 4828 * @dest_ctx: the context to be written to 4829 * @ce_info: a description of the struct to be filled 4830 */ 4831 static void 4832 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4833 { 4834 u8 dest_byte, mask; 4835 u8 *src, *target; 4836 u16 shift_width; 4837 4838 /* prepare the bits and mask */ 4839 shift_width = ce_info->lsb % 8; 4840 mask = (u8)(BIT(ce_info->width) - 1); 4841 4842 /* shift to correct alignment */ 4843 mask <<= shift_width; 4844 4845 /* get the current bits from the src bit string */ 4846 src = src_ctx + (ce_info->lsb / 8); 4847 4848 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA); 4849 4850 dest_byte &= mask; 4851 4852 dest_byte >>= shift_width; 4853 4854 /* get the address from the struct field */ 4855 target = dest_ctx + ce_info->offset; 4856 4857 /* put it back in the struct */ 4858 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); 4859 } 4860 4861 /** 4862 * ice_read_word - read context word into struct 4863 * @src_ctx: the context structure to read from 4864 * @dest_ctx: the context to be written to 4865 * @ce_info: a description of the struct to be filled 4866 */ 4867 static void 4868 ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4869 { 4870 u16 dest_word, mask; 4871 u8 *src, *target; 4872 __le16 src_word; 4873 u16 shift_width; 4874 4875 /* prepare the bits and mask */ 4876 shift_width = ce_info->lsb % 8; 4877 mask = BIT(ce_info->width) - 1; 4878 4879 /* shift to correct alignment */ 4880 mask <<= shift_width; 4881 4882 /* get the current bits from the src bit string */ 4883 src = src_ctx + (ce_info->lsb / 8); 4884 4885 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA); 4886 4887 /* the data in the memory is stored as little endian so mask it 4888 * correctly 4889 */ 4890 src_word &= CPU_TO_LE16(mask); 4891 4892 /* get the data back into host order before shifting */ 4893 dest_word = LE16_TO_CPU(src_word); 4894 4895 dest_word >>= shift_width; 4896 4897 /* get the address from the struct field */ 4898 target = dest_ctx + ce_info->offset; 4899 4900 /* put it back in the struct */ 4901 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); 4902 } 4903 4904 /** 4905 * ice_read_dword - read context dword into struct 4906 * @src_ctx: the context structure to read from 4907 * @dest_ctx: the context to be written to 4908 * @ce_info: a description of the struct to be filled 4909 */ 4910 static void 4911 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4912 { 4913 u32 dest_dword, mask; 4914 __le32 src_dword; 4915 u8 *src, *target; 4916 u16 shift_width; 4917 4918 /* prepare the bits and mask */ 4919 shift_width = ce_info->lsb % 8; 4920 4921 /* if the field width is exactly 32 on an x86 machine, then the shift 4922 * operation will not work because the SHL instructions count is masked 4923 * to 5 bits so the shift will do nothing 4924 */ 4925 if (ce_info->width < 32) 4926 mask = BIT(ce_info->width) - 1; 4927 else 4928 mask = (u32)~0; 4929 4930 /* shift to correct alignment */ 4931 mask <<= shift_width; 4932 4933 /* get the current bits from the src bit string */ 4934 src = src_ctx + (ce_info->lsb / 8); 4935 4936 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA); 4937 4938 /* the data in the memory is stored as little endian so mask it 4939 * correctly 4940 */ 4941 src_dword &= CPU_TO_LE32(mask); 4942 4943 /* get the data back into host order before shifting */ 4944 dest_dword = LE32_TO_CPU(src_dword); 4945 4946 dest_dword >>= shift_width; 4947 4948 /* get the address from the struct field */ 4949 target = dest_ctx + ce_info->offset; 4950 4951 /* put it back in the struct */ 4952 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); 4953 } 4954 4955 /** 4956 * ice_read_qword - read context qword into struct 4957 * @src_ctx: the context structure to read from 4958 * @dest_ctx: the context to be written to 4959 * @ce_info: a description of the struct to be filled 4960 */ 4961 static void 4962 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4963 { 4964 u64 dest_qword, mask; 4965 __le64 src_qword; 4966 u8 *src, *target; 4967 u16 shift_width; 4968 4969 /* prepare the bits and mask */ 4970 shift_width = ce_info->lsb % 8; 4971 4972 /* if the field width is exactly 64 on an x86 machine, then the shift 4973 * operation will not work because the SHL instructions count is masked 4974 * to 6 bits so the shift will do nothing 4975 */ 4976 if (ce_info->width < 64) 4977 mask = BIT_ULL(ce_info->width) - 1; 4978 else 4979 mask = (u64)~0; 4980 4981 /* shift to correct alignment */ 4982 mask <<= shift_width; 4983 4984 /* get the current bits from the src bit string */ 4985 src = src_ctx + (ce_info->lsb / 8); 4986 4987 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA); 4988 4989 /* the data in the memory is stored as little endian so mask it 4990 * correctly 4991 */ 4992 src_qword &= CPU_TO_LE64(mask); 4993 4994 /* get the data back into host order before shifting */ 4995 dest_qword = LE64_TO_CPU(src_qword); 4996 4997 dest_qword >>= shift_width; 4998 4999 /* get the address from the struct field */ 5000 target = dest_ctx + ce_info->offset; 5001 5002 /* put it back in the struct */ 5003 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); 5004 } 5005 5006 /** 5007 * ice_get_ctx - extract context bits from a packed structure 5008 * @src_ctx: pointer to a generic packed context structure 5009 * @dest_ctx: pointer to a generic non-packed context structure 5010 * @ce_info: a description of the structure to be read from 5011 */ 5012 enum ice_status 5013 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 5014 { 5015 int f; 5016 5017 for (f = 0; ce_info[f].width; f++) { 5018 switch (ce_info[f].size_of) { 5019 case 1: 5020 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]); 5021 break; 5022 case 2: 5023 ice_read_word(src_ctx, dest_ctx, &ce_info[f]); 5024 break; 5025 case 4: 5026 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]); 5027 break; 5028 case 8: 5029 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]); 5030 break; 5031 default: 5032 /* nothing to do, just keep going */ 5033 break; 5034 } 5035 } 5036 5037 return ICE_SUCCESS; 5038 } 5039 5040 /** 5041 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 5042 * @hw: pointer to the HW struct 5043 * @vsi_handle: software VSI handle 5044 * @tc: TC number 5045 * @q_handle: software queue handle 5046 */ 5047 struct ice_q_ctx * 5048 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 5049 { 5050 struct ice_vsi_ctx *vsi; 5051 struct ice_q_ctx *q_ctx; 5052 5053 vsi = ice_get_vsi_ctx(hw, vsi_handle); 5054 if (!vsi) 5055 return NULL; 5056 if (q_handle >= vsi->num_lan_q_entries[tc]) 5057 return NULL; 5058 if (!vsi->lan_q_ctx[tc]) 5059 return NULL; 5060 q_ctx = vsi->lan_q_ctx[tc]; 5061 return &q_ctx[q_handle]; 5062 } 5063 5064 /** 5065 * ice_ena_vsi_txq 5066 * @pi: port information structure 5067 * @vsi_handle: software VSI handle 5068 * @tc: TC number 5069 * @q_handle: software queue handle 5070 * @num_qgrps: Number of added queue groups 5071 * @buf: list of queue groups to be added 5072 * @buf_size: size of buffer for indirect command 5073 * @cd: pointer to command details structure or NULL 5074 * 5075 * This function adds one LAN queue 5076 */ 5077 enum ice_status 5078 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 5079 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 5080 struct ice_sq_cd *cd) 5081 { 5082 struct ice_aqc_txsched_elem_data node = { 0 }; 5083 struct ice_sched_node *parent; 5084 struct ice_q_ctx *q_ctx; 5085 enum ice_status status; 5086 struct ice_hw *hw; 5087 5088 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5089 return ICE_ERR_CFG; 5090 5091 if (num_qgrps > 1 || buf->num_txqs > 1) 5092 return ICE_ERR_MAX_LIMIT; 5093 5094 hw = pi->hw; 5095 5096 if (!ice_is_vsi_valid(hw, vsi_handle)) 5097 return ICE_ERR_PARAM; 5098 5099 ice_acquire_lock(&pi->sched_lock); 5100 5101 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 5102 if (!q_ctx) { 5103 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 5104 q_handle); 5105 status = ICE_ERR_PARAM; 5106 goto ena_txq_exit; 5107 } 5108 5109 /* find a parent node */ 5110 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 5111 ICE_SCHED_NODE_OWNER_LAN); 5112 if (!parent) { 5113 status = ICE_ERR_PARAM; 5114 goto ena_txq_exit; 5115 } 5116 5117 buf->parent_teid = parent->info.node_teid; 5118 node.parent_teid = parent->info.node_teid; 5119 /* Mark that the values in the "generic" section as valid. The default 5120 * value in the "generic" section is zero. This means that : 5121 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 5122 * - 0 priority among siblings, indicated by Bit 1-3. 5123 * - WFQ, indicated by Bit 4. 5124 * - 0 Adjustment value is used in PSM credit update flow, indicated by 5125 * Bit 5-6. 5126 * - Bit 7 is reserved. 5127 * Without setting the generic section as valid in valid_sections, the 5128 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 5129 */ 5130 buf->txqs[0].info.valid_sections = 5131 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5132 ICE_AQC_ELEM_VALID_EIR; 5133 buf->txqs[0].info.generic = 0; 5134 buf->txqs[0].info.cir_bw.bw_profile_idx = 5135 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 5136 buf->txqs[0].info.cir_bw.bw_alloc = 5137 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 5138 buf->txqs[0].info.eir_bw.bw_profile_idx = 5139 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 5140 buf->txqs[0].info.eir_bw.bw_alloc = 5141 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 5142 5143 /* add the LAN queue */ 5144 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 5145 if (status != ICE_SUCCESS) { 5146 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 5147 LE16_TO_CPU(buf->txqs[0].txq_id), 5148 hw->adminq.sq_last_status); 5149 goto ena_txq_exit; 5150 } 5151 5152 node.node_teid = buf->txqs[0].q_teid; 5153 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5154 q_ctx->q_handle = q_handle; 5155 q_ctx->q_teid = LE32_TO_CPU(node.node_teid); 5156 5157 /* add a leaf node into scheduler tree queue layer */ 5158 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 5159 if (!status) 5160 status = ice_sched_replay_q_bw(pi, q_ctx); 5161 5162 ena_txq_exit: 5163 ice_release_lock(&pi->sched_lock); 5164 return status; 5165 } 5166 5167 /** 5168 * ice_dis_vsi_txq 5169 * @pi: port information structure 5170 * @vsi_handle: software VSI handle 5171 * @tc: TC number 5172 * @num_queues: number of queues 5173 * @q_handles: pointer to software queue handle array 5174 * @q_ids: pointer to the q_id array 5175 * @q_teids: pointer to queue node teids 5176 * @rst_src: if called due to reset, specifies the reset source 5177 * @vmvf_num: the relative VM or VF number that is undergoing the reset 5178 * @cd: pointer to command details structure or NULL 5179 * 5180 * This function removes queues and their corresponding nodes in SW DB 5181 */ 5182 enum ice_status 5183 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 5184 u16 *q_handles, u16 *q_ids, u32 *q_teids, 5185 enum ice_disq_rst_src rst_src, u16 vmvf_num, 5186 struct ice_sq_cd *cd) 5187 { 5188 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 5189 struct ice_aqc_dis_txq_item *qg_list; 5190 struct ice_q_ctx *q_ctx; 5191 struct ice_hw *hw; 5192 u16 i, buf_size; 5193 5194 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5195 return ICE_ERR_CFG; 5196 5197 hw = pi->hw; 5198 5199 if (!num_queues) { 5200 /* if queue is disabled already yet the disable queue command 5201 * has to be sent to complete the VF reset, then call 5202 * ice_aq_dis_lan_txq without any queue information 5203 */ 5204 if (rst_src) 5205 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 5206 vmvf_num, NULL); 5207 return ICE_ERR_CFG; 5208 } 5209 5210 buf_size = ice_struct_size(qg_list, q_id, 1); 5211 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size); 5212 if (!qg_list) 5213 return ICE_ERR_NO_MEMORY; 5214 5215 ice_acquire_lock(&pi->sched_lock); 5216 5217 for (i = 0; i < num_queues; i++) { 5218 struct ice_sched_node *node; 5219 5220 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 5221 if (!node) 5222 continue; 5223 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 5224 if (!q_ctx) { 5225 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 5226 q_handles[i]); 5227 continue; 5228 } 5229 if (q_ctx->q_handle != q_handles[i]) { 5230 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 5231 q_ctx->q_handle, q_handles[i]); 5232 continue; 5233 } 5234 qg_list->parent_teid = node->info.parent_teid; 5235 qg_list->num_qs = 1; 5236 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]); 5237 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 5238 vmvf_num, cd); 5239 5240 if (status != ICE_SUCCESS) 5241 break; 5242 ice_free_sched_node(pi, node); 5243 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 5244 } 5245 ice_release_lock(&pi->sched_lock); 5246 ice_free(hw, qg_list); 5247 return status; 5248 } 5249 5250 /** 5251 * ice_cfg_vsi_qs - configure the new/existing VSI queues 5252 * @pi: port information structure 5253 * @vsi_handle: software VSI handle 5254 * @tc_bitmap: TC bitmap 5255 * @maxqs: max queues array per TC 5256 * @owner: LAN or RDMA 5257 * 5258 * This function adds/updates the VSI queues per TC. 5259 */ 5260 static enum ice_status 5261 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5262 u16 *maxqs, u8 owner) 5263 { 5264 enum ice_status status = ICE_SUCCESS; 5265 u8 i; 5266 5267 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5268 return ICE_ERR_CFG; 5269 5270 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 5271 return ICE_ERR_PARAM; 5272 5273 ice_acquire_lock(&pi->sched_lock); 5274 5275 ice_for_each_traffic_class(i) { 5276 /* configuration is possible only if TC node is present */ 5277 if (!ice_sched_get_tc_node(pi, i)) 5278 continue; 5279 5280 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 5281 ice_is_tc_ena(tc_bitmap, i)); 5282 if (status) 5283 break; 5284 } 5285 5286 ice_release_lock(&pi->sched_lock); 5287 return status; 5288 } 5289 5290 /** 5291 * ice_cfg_vsi_lan - configure VSI LAN queues 5292 * @pi: port information structure 5293 * @vsi_handle: software VSI handle 5294 * @tc_bitmap: TC bitmap 5295 * @max_lanqs: max LAN queues array per TC 5296 * 5297 * This function adds/updates the VSI LAN queues per TC. 5298 */ 5299 enum ice_status 5300 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5301 u16 *max_lanqs) 5302 { 5303 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 5304 ICE_SCHED_NODE_OWNER_LAN); 5305 } 5306 5307 /** 5308 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 5309 * @pi: port information structure 5310 * @vsi_handle: software VSI handle 5311 * @tc_bitmap: TC bitmap 5312 * @max_rdmaqs: max RDMA queues array per TC 5313 * 5314 * This function adds/updates the VSI RDMA queues per TC. 5315 */ 5316 enum ice_status 5317 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5318 u16 *max_rdmaqs) 5319 { 5320 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 5321 ICE_SCHED_NODE_OWNER_RDMA); 5322 } 5323 5324 /** 5325 * ice_ena_vsi_rdma_qset 5326 * @pi: port information structure 5327 * @vsi_handle: software VSI handle 5328 * @tc: TC number 5329 * @rdma_qset: pointer to RDMA qset 5330 * @num_qsets: number of RDMA qsets 5331 * @qset_teid: pointer to qset node teids 5332 * 5333 * This function adds RDMA qset 5334 */ 5335 enum ice_status 5336 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 5337 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 5338 { 5339 struct ice_aqc_txsched_elem_data node = { 0 }; 5340 struct ice_aqc_add_rdma_qset_data *buf; 5341 struct ice_sched_node *parent; 5342 enum ice_status status; 5343 struct ice_hw *hw; 5344 u16 i, buf_size; 5345 5346 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5347 return ICE_ERR_CFG; 5348 hw = pi->hw; 5349 5350 if (!ice_is_vsi_valid(hw, vsi_handle)) 5351 return ICE_ERR_PARAM; 5352 5353 buf_size = ice_struct_size(buf, rdma_qsets, num_qsets); 5354 buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size); 5355 if (!buf) 5356 return ICE_ERR_NO_MEMORY; 5357 ice_acquire_lock(&pi->sched_lock); 5358 5359 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 5360 ICE_SCHED_NODE_OWNER_RDMA); 5361 if (!parent) { 5362 status = ICE_ERR_PARAM; 5363 goto rdma_error_exit; 5364 } 5365 buf->parent_teid = parent->info.node_teid; 5366 node.parent_teid = parent->info.node_teid; 5367 5368 buf->num_qsets = CPU_TO_LE16(num_qsets); 5369 for (i = 0; i < num_qsets; i++) { 5370 buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]); 5371 buf->rdma_qsets[i].info.valid_sections = 5372 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5373 ICE_AQC_ELEM_VALID_EIR; 5374 buf->rdma_qsets[i].info.generic = 0; 5375 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 5376 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 5377 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 5378 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 5379 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 5380 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 5381 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 5382 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 5383 } 5384 status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 5385 if (status != ICE_SUCCESS) { 5386 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 5387 goto rdma_error_exit; 5388 } 5389 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5390 for (i = 0; i < num_qsets; i++) { 5391 node.node_teid = buf->rdma_qsets[i].qset_teid; 5392 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 5393 &node); 5394 if (status) 5395 break; 5396 qset_teid[i] = LE32_TO_CPU(node.node_teid); 5397 } 5398 rdma_error_exit: 5399 ice_release_lock(&pi->sched_lock); 5400 ice_free(hw, buf); 5401 return status; 5402 } 5403 5404 /** 5405 * ice_dis_vsi_rdma_qset - free RDMA resources 5406 * @pi: port_info struct 5407 * @count: number of RDMA qsets to free 5408 * @qset_teid: TEID of qset node 5409 * @q_id: list of queue IDs being disabled 5410 */ 5411 enum ice_status 5412 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 5413 u16 *q_id) 5414 { 5415 struct ice_aqc_dis_txq_item *qg_list; 5416 enum ice_status status = ICE_SUCCESS; 5417 struct ice_hw *hw; 5418 u16 qg_size; 5419 int i; 5420 5421 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5422 return ICE_ERR_CFG; 5423 5424 hw = pi->hw; 5425 5426 qg_size = ice_struct_size(qg_list, q_id, 1); 5427 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size); 5428 if (!qg_list) 5429 return ICE_ERR_NO_MEMORY; 5430 5431 ice_acquire_lock(&pi->sched_lock); 5432 5433 for (i = 0; i < count; i++) { 5434 struct ice_sched_node *node; 5435 5436 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 5437 if (!node) 5438 continue; 5439 5440 qg_list->parent_teid = node->info.parent_teid; 5441 qg_list->num_qs = 1; 5442 qg_list->q_id[0] = 5443 CPU_TO_LE16(q_id[i] | 5444 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 5445 5446 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 5447 ICE_NO_RESET, 0, NULL); 5448 if (status) 5449 break; 5450 5451 ice_free_sched_node(pi, node); 5452 } 5453 5454 ice_release_lock(&pi->sched_lock); 5455 ice_free(hw, qg_list); 5456 return status; 5457 } 5458 5459 /** 5460 * ice_is_main_vsi - checks whether the VSI is main VSI 5461 * @hw: pointer to the HW struct 5462 * @vsi_handle: VSI handle 5463 * 5464 * Checks whether the VSI is the main VSI (the first PF VSI created on 5465 * given PF). 5466 */ 5467 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle) 5468 { 5469 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle]; 5470 } 5471 5472 /** 5473 * ice_replay_pre_init - replay pre initialization 5474 * @hw: pointer to the HW struct 5475 * @sw: pointer to switch info struct for which function initializes filters 5476 * 5477 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5478 */ 5479 enum ice_status 5480 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw) 5481 { 5482 enum ice_status status; 5483 u8 i; 5484 5485 /* Delete old entries from replay filter list head if there is any */ 5486 ice_rm_sw_replay_rule_info(hw, sw); 5487 /* In start of replay, move entries into replay_rules list, it 5488 * will allow adding rules entries back to filt_rules list, 5489 * which is operational list. 5490 */ 5491 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5492 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules, 5493 &sw->recp_list[i].filt_replay_rules); 5494 ice_sched_replay_agg_vsi_preinit(hw); 5495 5496 status = ice_sched_replay_root_node_bw(hw->port_info); 5497 if (status) 5498 return status; 5499 5500 return ice_sched_replay_tc_node_bw(hw->port_info); 5501 } 5502 5503 /** 5504 * ice_replay_vsi - replay VSI configuration 5505 * @hw: pointer to the HW struct 5506 * @vsi_handle: driver VSI handle 5507 * 5508 * Restore all VSI configuration after reset. It is required to call this 5509 * function with main VSI first. 5510 */ 5511 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5512 { 5513 struct ice_switch_info *sw = hw->switch_info; 5514 struct ice_port_info *pi = hw->port_info; 5515 enum ice_status status; 5516 5517 if (!ice_is_vsi_valid(hw, vsi_handle)) 5518 return ICE_ERR_PARAM; 5519 5520 /* Replay pre-initialization if there is any */ 5521 if (ice_is_main_vsi(hw, vsi_handle)) { 5522 status = ice_replay_pre_init(hw, sw); 5523 if (status) 5524 return status; 5525 } 5526 /* Replay per VSI all RSS configurations */ 5527 status = ice_replay_rss_cfg(hw, vsi_handle); 5528 if (status) 5529 return status; 5530 /* Replay per VSI all filters */ 5531 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle); 5532 if (!status) 5533 status = ice_replay_vsi_agg(hw, vsi_handle); 5534 return status; 5535 } 5536 5537 /** 5538 * ice_replay_post - post replay configuration cleanup 5539 * @hw: pointer to the HW struct 5540 * 5541 * Post replay cleanup. 5542 */ 5543 void ice_replay_post(struct ice_hw *hw) 5544 { 5545 /* Delete old entries from replay filter list head */ 5546 ice_rm_all_sw_replay_rule_info(hw); 5547 ice_sched_replay_agg(hw); 5548 } 5549 5550 /** 5551 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5552 * @hw: ptr to the hardware info 5553 * @reg: offset of 64 bit HW register to read from 5554 * @prev_stat_loaded: bool to specify if previous stats are loaded 5555 * @prev_stat: ptr to previous loaded stat value 5556 * @cur_stat: ptr to current stat value 5557 */ 5558 void 5559 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5560 u64 *prev_stat, u64 *cur_stat) 5561 { 5562 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5563 5564 /* device stats are not reset at PFR, they likely will not be zeroed 5565 * when the driver starts. Thus, save the value from the first read 5566 * without adding to the statistic value so that we report stats which 5567 * count up from zero. 5568 */ 5569 if (!prev_stat_loaded) { 5570 *prev_stat = new_data; 5571 return; 5572 } 5573 5574 /* Calculate the difference between the new and old values, and then 5575 * add it to the software stat value. 5576 */ 5577 if (new_data >= *prev_stat) 5578 *cur_stat += new_data - *prev_stat; 5579 else 5580 /* to manage the potential roll-over */ 5581 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5582 5583 /* Update the previously stored value to prepare for next read */ 5584 *prev_stat = new_data; 5585 } 5586 5587 /** 5588 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5589 * @hw: ptr to the hardware info 5590 * @reg: offset of HW register to read from 5591 * @prev_stat_loaded: bool to specify if previous stats are loaded 5592 * @prev_stat: ptr to previous loaded stat value 5593 * @cur_stat: ptr to current stat value 5594 */ 5595 void 5596 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5597 u64 *prev_stat, u64 *cur_stat) 5598 { 5599 u32 new_data; 5600 5601 new_data = rd32(hw, reg); 5602 5603 /* device stats are not reset at PFR, they likely will not be zeroed 5604 * when the driver starts. Thus, save the value from the first read 5605 * without adding to the statistic value so that we report stats which 5606 * count up from zero. 5607 */ 5608 if (!prev_stat_loaded) { 5609 *prev_stat = new_data; 5610 return; 5611 } 5612 5613 /* Calculate the difference between the new and old values, and then 5614 * add it to the software stat value. 5615 */ 5616 if (new_data >= *prev_stat) 5617 *cur_stat += new_data - *prev_stat; 5618 else 5619 /* to manage the potential roll-over */ 5620 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5621 5622 /* Update the previously stored value to prepare for next read */ 5623 *prev_stat = new_data; 5624 } 5625 5626 /** 5627 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values 5628 * @hw: ptr to the hardware info 5629 * @vsi_handle: VSI handle 5630 * @prev_stat_loaded: bool to specify if the previous stat values are loaded 5631 * @cur_stats: ptr to current stats structure 5632 * 5633 * The GLV_REPC statistic register actually tracks two 16bit statistics, and 5634 * thus cannot be read using the normal ice_stat_update32 function. 5635 * 5636 * Read the GLV_REPC register associated with the given VSI, and update the 5637 * rx_no_desc and rx_error values in the ice_eth_stats structure. 5638 * 5639 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be 5640 * cleared each time it's read. 5641 * 5642 * Note that the GLV_RDPC register also counts the causes that would trigger 5643 * GLV_REPC. However, it does not give the finer grained detail about why the 5644 * packets are being dropped. The GLV_REPC values can be used to distinguish 5645 * whether Rx packets are dropped due to errors or due to no available 5646 * descriptors. 5647 */ 5648 void 5649 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded, 5650 struct ice_eth_stats *cur_stats) 5651 { 5652 u16 vsi_num, no_desc, error_cnt; 5653 u32 repc; 5654 5655 if (!ice_is_vsi_valid(hw, vsi_handle)) 5656 return; 5657 5658 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 5659 5660 /* If we haven't loaded stats yet, just clear the current value */ 5661 if (!prev_stat_loaded) { 5662 wr32(hw, GLV_REPC(vsi_num), 0); 5663 return; 5664 } 5665 5666 repc = rd32(hw, GLV_REPC(vsi_num)); 5667 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S; 5668 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S; 5669 5670 /* Clear the count by writing to the stats register */ 5671 wr32(hw, GLV_REPC(vsi_num), 0); 5672 5673 cur_stats->rx_no_desc += no_desc; 5674 cur_stats->rx_errors += error_cnt; 5675 } 5676 5677 /** 5678 * ice_aq_alternate_write 5679 * @hw: pointer to the hardware structure 5680 * @reg_addr0: address of first dword to be written 5681 * @reg_val0: value to be written under 'reg_addr0' 5682 * @reg_addr1: address of second dword to be written 5683 * @reg_val1: value to be written under 'reg_addr1' 5684 * 5685 * Write one or two dwords to alternate structure. Fields are indicated 5686 * by 'reg_addr0' and 'reg_addr1' register numbers. 5687 */ 5688 enum ice_status 5689 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0, 5690 u32 reg_addr1, u32 reg_val1) 5691 { 5692 struct ice_aqc_read_write_alt_direct *cmd; 5693 struct ice_aq_desc desc; 5694 enum ice_status status; 5695 5696 cmd = &desc.params.read_write_alt_direct; 5697 5698 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct); 5699 cmd->dword0_addr = CPU_TO_LE32(reg_addr0); 5700 cmd->dword1_addr = CPU_TO_LE32(reg_addr1); 5701 cmd->dword0_value = CPU_TO_LE32(reg_val0); 5702 cmd->dword1_value = CPU_TO_LE32(reg_val1); 5703 5704 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5705 5706 return status; 5707 } 5708 5709 /** 5710 * ice_aq_alternate_read 5711 * @hw: pointer to the hardware structure 5712 * @reg_addr0: address of first dword to be read 5713 * @reg_val0: pointer for data read from 'reg_addr0' 5714 * @reg_addr1: address of second dword to be read 5715 * @reg_val1: pointer for data read from 'reg_addr1' 5716 * 5717 * Read one or two dwords from alternate structure. Fields are indicated 5718 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 5719 * is not passed then only register at 'reg_addr0' is read. 5720 */ 5721 enum ice_status 5722 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0, 5723 u32 reg_addr1, u32 *reg_val1) 5724 { 5725 struct ice_aqc_read_write_alt_direct *cmd; 5726 struct ice_aq_desc desc; 5727 enum ice_status status; 5728 5729 cmd = &desc.params.read_write_alt_direct; 5730 5731 if (!reg_val0) 5732 return ICE_ERR_PARAM; 5733 5734 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct); 5735 cmd->dword0_addr = CPU_TO_LE32(reg_addr0); 5736 cmd->dword1_addr = CPU_TO_LE32(reg_addr1); 5737 5738 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5739 5740 if (status == ICE_SUCCESS) { 5741 *reg_val0 = LE32_TO_CPU(cmd->dword0_value); 5742 5743 if (reg_val1) 5744 *reg_val1 = LE32_TO_CPU(cmd->dword1_value); 5745 } 5746 5747 return status; 5748 } 5749 5750 /** 5751 * ice_aq_alternate_write_done 5752 * @hw: pointer to the HW structure. 5753 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS 5754 * @reset_needed: indicates the SW should trigger GLOBAL reset 5755 * 5756 * Indicates to the FW that alternate structures have been changed. 5757 */ 5758 enum ice_status 5759 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed) 5760 { 5761 struct ice_aqc_done_alt_write *cmd; 5762 struct ice_aq_desc desc; 5763 enum ice_status status; 5764 5765 cmd = &desc.params.done_alt_write; 5766 5767 if (!reset_needed) 5768 return ICE_ERR_PARAM; 5769 5770 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write); 5771 cmd->flags = bios_mode; 5772 5773 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5774 if (!status) 5775 *reset_needed = (LE16_TO_CPU(cmd->flags) & 5776 ICE_AQC_RESP_RESET_NEEDED) != 0; 5777 5778 return status; 5779 } 5780 5781 /** 5782 * ice_aq_alternate_clear 5783 * @hw: pointer to the HW structure. 5784 * 5785 * Clear the alternate structures of the port from which the function 5786 * is called. 5787 */ 5788 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw) 5789 { 5790 struct ice_aq_desc desc; 5791 enum ice_status status; 5792 5793 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write); 5794 5795 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5796 5797 return status; 5798 } 5799 5800 /** 5801 * ice_sched_query_elem - query element information from HW 5802 * @hw: pointer to the HW struct 5803 * @node_teid: node TEID to be queried 5804 * @buf: buffer to element information 5805 * 5806 * This function queries HW element information 5807 */ 5808 enum ice_status 5809 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5810 struct ice_aqc_txsched_elem_data *buf) 5811 { 5812 u16 buf_size, num_elem_ret = 0; 5813 enum ice_status status; 5814 5815 buf_size = sizeof(*buf); 5816 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM); 5817 buf->node_teid = CPU_TO_LE32(node_teid); 5818 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5819 NULL); 5820 if (status != ICE_SUCCESS || num_elem_ret != 1) 5821 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5822 return status; 5823 } 5824 5825 /** 5826 * ice_get_fw_mode - returns FW mode 5827 * @hw: pointer to the HW struct 5828 */ 5829 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw) 5830 { 5831 #define ICE_FW_MODE_DBG_M BIT(0) 5832 #define ICE_FW_MODE_REC_M BIT(1) 5833 #define ICE_FW_MODE_ROLLBACK_M BIT(2) 5834 u32 fw_mode; 5835 5836 /* check the current FW mode */ 5837 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M; 5838 if (fw_mode & ICE_FW_MODE_DBG_M) 5839 return ICE_FW_MODE_DBG; 5840 else if (fw_mode & ICE_FW_MODE_REC_M) 5841 return ICE_FW_MODE_REC; 5842 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M) 5843 return ICE_FW_MODE_ROLLBACK; 5844 else 5845 return ICE_FW_MODE_NORMAL; 5846 } 5847 5848 /** 5849 * ice_get_cur_lldp_persist_status 5850 * @hw: pointer to the HW struct 5851 * @lldp_status: return value of LLDP persistent status 5852 * 5853 * Get the current status of LLDP persistent 5854 */ 5855 enum ice_status 5856 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status) 5857 { 5858 struct ice_port_info *pi = hw->port_info; 5859 enum ice_status ret; 5860 __le32 raw_data; 5861 u32 data, mask; 5862 5863 if (!lldp_status) 5864 return ICE_ERR_BAD_PTR; 5865 5866 ret = ice_acquire_nvm(hw, ICE_RES_READ); 5867 if (ret) 5868 return ret; 5869 5870 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID, 5871 ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET, 5872 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, 5873 false, true, NULL); 5874 if (!ret) { 5875 data = LE32_TO_CPU(raw_data); 5876 mask = ICE_AQC_NVM_LLDP_STATUS_M << 5877 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); 5878 data = data & mask; 5879 *lldp_status = data >> 5880 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); 5881 } 5882 5883 ice_release_nvm(hw); 5884 5885 return ret; 5886 } 5887 5888 /** 5889 * ice_get_dflt_lldp_persist_status 5890 * @hw: pointer to the HW struct 5891 * @lldp_status: return value of LLDP persistent status 5892 * 5893 * Get the default status of LLDP persistent 5894 */ 5895 enum ice_status 5896 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status) 5897 { 5898 struct ice_port_info *pi = hw->port_info; 5899 u32 data, mask, loc_data, loc_data_tmp; 5900 enum ice_status ret; 5901 __le16 loc_raw_data; 5902 __le32 raw_data; 5903 5904 if (!lldp_status) 5905 return ICE_ERR_BAD_PTR; 5906 5907 ret = ice_acquire_nvm(hw, ICE_RES_READ); 5908 if (ret) 5909 return ret; 5910 5911 /* Read the offset of EMP_SR_PTR */ 5912 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, 5913 ICE_AQC_NVM_EMP_SR_PTR_OFFSET, 5914 ICE_AQC_NVM_EMP_SR_PTR_RD_LEN, 5915 &loc_raw_data, false, true, NULL); 5916 if (ret) 5917 goto exit; 5918 5919 loc_data = LE16_TO_CPU(loc_raw_data); 5920 if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) { 5921 loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M; 5922 loc_data *= ICE_AQC_NVM_SECTOR_UNIT; 5923 } else { 5924 loc_data *= ICE_AQC_NVM_WORD_UNIT; 5925 } 5926 5927 /* Read the offset of LLDP configuration pointer */ 5928 loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET; 5929 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data, 5930 ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data, 5931 false, true, NULL); 5932 if (ret) 5933 goto exit; 5934 5935 loc_data_tmp = LE16_TO_CPU(loc_raw_data); 5936 loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT; 5937 loc_data += loc_data_tmp; 5938 5939 /* We need to skip LLDP configuration section length (2 bytes) */ 5940 loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN; 5941 5942 /* Read the LLDP Default Configure */ 5943 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data, 5944 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false, 5945 true, NULL); 5946 if (!ret) { 5947 data = LE32_TO_CPU(raw_data); 5948 mask = ICE_AQC_NVM_LLDP_STATUS_M << 5949 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); 5950 data = data & mask; 5951 *lldp_status = data >> 5952 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); 5953 } 5954 5955 exit: 5956 ice_release_nvm(hw); 5957 5958 return ret; 5959 } 5960 5961 /** 5962 * ice_aq_read_i2c 5963 * @hw: pointer to the hw struct 5964 * @topo_addr: topology address for a device to communicate with 5965 * @bus_addr: 7-bit I2C bus address 5966 * @addr: I2C memory address (I2C offset) with up to 16 bits 5967 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size, 5968 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes) 5969 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5970 * @cd: pointer to command details structure or NULL 5971 * 5972 * Read I2C (0x06E2) 5973 */ 5974 enum ice_status 5975 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 5976 u16 bus_addr, __le16 addr, u8 params, u8 *data, 5977 struct ice_sq_cd *cd) 5978 { 5979 struct ice_aq_desc desc = { 0 }; 5980 struct ice_aqc_i2c *cmd; 5981 enum ice_status status; 5982 u8 data_size; 5983 5984 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 5985 cmd = &desc.params.read_write_i2c; 5986 5987 if (!data) 5988 return ICE_ERR_PARAM; 5989 5990 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S; 5991 5992 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr); 5993 cmd->topo_addr = topo_addr; 5994 cmd->i2c_params = params; 5995 cmd->i2c_addr = addr; 5996 5997 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5998 if (!status) { 5999 struct ice_aqc_read_i2c_resp *resp; 6000 u8 i; 6001 6002 resp = &desc.params.read_i2c_resp; 6003 for (i = 0; i < data_size; i++) { 6004 *data = resp->i2c_data[i]; 6005 data++; 6006 } 6007 } 6008 6009 return status; 6010 } 6011 6012 /** 6013 * ice_aq_write_i2c 6014 * @hw: pointer to the hw struct 6015 * @topo_addr: topology address for a device to communicate with 6016 * @bus_addr: 7-bit I2C bus address 6017 * @addr: I2C memory address (I2C offset) with up to 16 bits 6018 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 6019 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 6020 * @cd: pointer to command details structure or NULL 6021 * 6022 * Write I2C (0x06E3) 6023 */ 6024 enum ice_status 6025 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 6026 u16 bus_addr, __le16 addr, u8 params, u8 *data, 6027 struct ice_sq_cd *cd) 6028 { 6029 struct ice_aq_desc desc = { 0 }; 6030 struct ice_aqc_i2c *cmd; 6031 u8 i, data_size; 6032 6033 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 6034 cmd = &desc.params.read_write_i2c; 6035 6036 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S; 6037 6038 /* data_size limited to 4 */ 6039 if (data_size > 4) 6040 return ICE_ERR_PARAM; 6041 6042 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr); 6043 cmd->topo_addr = topo_addr; 6044 cmd->i2c_params = params; 6045 cmd->i2c_addr = addr; 6046 6047 for (i = 0; i < data_size; i++) { 6048 cmd->i2c_data[i] = *data; 6049 data++; 6050 } 6051 6052 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 6053 } 6054 6055 /** 6056 * ice_aq_set_gpio 6057 * @hw: pointer to the hw struct 6058 * @gpio_ctrl_handle: GPIO controller node handle 6059 * @pin_idx: IO Number of the GPIO that needs to be set 6060 * @value: SW provide IO value to set in the LSB 6061 * @cd: pointer to command details structure or NULL 6062 * 6063 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 6064 */ 6065 enum ice_status 6066 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 6067 struct ice_sq_cd *cd) 6068 { 6069 struct ice_aqc_gpio *cmd; 6070 struct ice_aq_desc desc; 6071 6072 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 6073 cmd = &desc.params.read_write_gpio; 6074 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); 6075 cmd->gpio_num = pin_idx; 6076 cmd->gpio_val = value ? 1 : 0; 6077 6078 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 6079 } 6080 6081 /** 6082 * ice_aq_get_gpio 6083 * @hw: pointer to the hw struct 6084 * @gpio_ctrl_handle: GPIO controller node handle 6085 * @pin_idx: IO Number of the GPIO that needs to be set 6086 * @value: IO value read 6087 * @cd: pointer to command details structure or NULL 6088 * 6089 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 6090 * the topology 6091 */ 6092 enum ice_status 6093 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 6094 bool *value, struct ice_sq_cd *cd) 6095 { 6096 struct ice_aqc_gpio *cmd; 6097 struct ice_aq_desc desc; 6098 enum ice_status status; 6099 6100 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 6101 cmd = &desc.params.read_write_gpio; 6102 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); 6103 cmd->gpio_num = pin_idx; 6104 6105 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 6106 if (status) 6107 return status; 6108 6109 *value = !!cmd->gpio_val; 6110 return ICE_SUCCESS; 6111 } 6112 6113 /** 6114 * ice_is_fw_api_min_ver 6115 * @hw: pointer to the hardware structure 6116 * @maj: major version 6117 * @min: minor version 6118 * @patch: patch version 6119 * 6120 * Checks if the firmware is minimum version 6121 */ 6122 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 6123 { 6124 if (hw->api_maj_ver == maj) { 6125 if (hw->api_min_ver > min) 6126 return true; 6127 if (hw->api_min_ver == min && hw->api_patch >= patch) 6128 return true; 6129 } else if (hw->api_maj_ver > maj) { 6130 return true; 6131 } 6132 6133 return false; 6134 } 6135 6136 /** 6137 * ice_is_fw_min_ver 6138 * @hw: pointer to the hardware structure 6139 * @branch: branch version 6140 * @maj: major version 6141 * @min: minor version 6142 * @patch: patch version 6143 * 6144 * Checks if the firmware is minimum version 6145 */ 6146 static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min, 6147 u8 patch) 6148 { 6149 if (hw->fw_branch == branch) { 6150 if (hw->fw_maj_ver > maj) 6151 return true; 6152 if (hw->fw_maj_ver == maj) { 6153 if (hw->fw_min_ver > min) 6154 return true; 6155 if (hw->fw_min_ver == min && hw->fw_patch >= patch) 6156 return true; 6157 } 6158 } else if (hw->fw_branch > branch) { 6159 return true; 6160 } 6161 6162 return false; 6163 } 6164 6165 /** 6166 * ice_fw_supports_link_override 6167 * @hw: pointer to the hardware structure 6168 * 6169 * Checks if the firmware supports link override 6170 */ 6171 bool ice_fw_supports_link_override(struct ice_hw *hw) 6172 { 6173 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 6174 ICE_FW_API_LINK_OVERRIDE_MIN, 6175 ICE_FW_API_LINK_OVERRIDE_PATCH); 6176 } 6177 6178 /** 6179 * ice_get_link_default_override 6180 * @ldo: pointer to the link default override struct 6181 * @pi: pointer to the port info struct 6182 * 6183 * Gets the link default override for a port 6184 */ 6185 enum ice_status 6186 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 6187 struct ice_port_info *pi) 6188 { 6189 u16 i, tlv, tlv_len, tlv_start, buf, offset; 6190 struct ice_hw *hw = pi->hw; 6191 enum ice_status status; 6192 6193 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 6194 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 6195 if (status) { 6196 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 6197 return status; 6198 } 6199 6200 /* Each port has its own config; calculate for our port */ 6201 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 6202 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 6203 6204 /* link options first */ 6205 status = ice_read_sr_word(hw, tlv_start, &buf); 6206 if (status) { 6207 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 6208 return status; 6209 } 6210 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 6211 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 6212 ICE_LINK_OVERRIDE_PHY_CFG_S; 6213 6214 /* link PHY config */ 6215 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 6216 status = ice_read_sr_word(hw, offset, &buf); 6217 if (status) { 6218 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 6219 return status; 6220 } 6221 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 6222 6223 /* PHY types low */ 6224 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 6225 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 6226 status = ice_read_sr_word(hw, (offset + i), &buf); 6227 if (status) { 6228 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 6229 return status; 6230 } 6231 /* shift 16 bits at a time to fill 64 bits */ 6232 ldo->phy_type_low |= ((u64)buf << (i * 16)); 6233 } 6234 6235 /* PHY types high */ 6236 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 6237 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 6238 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 6239 status = ice_read_sr_word(hw, (offset + i), &buf); 6240 if (status) { 6241 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 6242 return status; 6243 } 6244 /* shift 16 bits at a time to fill 64 bits */ 6245 ldo->phy_type_high |= ((u64)buf << (i * 16)); 6246 } 6247 6248 return status; 6249 } 6250 6251 /** 6252 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 6253 * @caps: get PHY capability data 6254 */ 6255 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 6256 { 6257 if (caps->caps & ICE_AQC_PHY_AN_MODE || 6258 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 6259 ICE_AQC_PHY_AN_EN_CLAUSE73 | 6260 ICE_AQC_PHY_AN_EN_CLAUSE37)) 6261 return true; 6262 6263 return false; 6264 } 6265 6266 /** 6267 * ice_is_fw_health_report_supported 6268 * @hw: pointer to the hardware structure 6269 * 6270 * Return true if firmware supports health status reports, 6271 * false otherwise 6272 */ 6273 bool ice_is_fw_health_report_supported(struct ice_hw *hw) 6274 { 6275 if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ) 6276 return true; 6277 6278 if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) { 6279 if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN) 6280 return true; 6281 if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN && 6282 hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH) 6283 return true; 6284 } 6285 6286 return false; 6287 } 6288 6289 /** 6290 * ice_aq_set_health_status_config - Configure FW health events 6291 * @hw: pointer to the HW struct 6292 * @event_source: type of diagnostic events to enable 6293 * @cd: pointer to command details structure or NULL 6294 * 6295 * Configure the health status event types that the firmware will send to this 6296 * PF. The supported event types are: PF-specific, all PFs, and global 6297 */ 6298 enum ice_status 6299 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, 6300 struct ice_sq_cd *cd) 6301 { 6302 struct ice_aqc_set_health_status_config *cmd; 6303 struct ice_aq_desc desc; 6304 6305 cmd = &desc.params.set_health_status_config; 6306 6307 ice_fill_dflt_direct_cmd_desc(&desc, 6308 ice_aqc_opc_set_health_status_config); 6309 6310 cmd->event_source = event_source; 6311 6312 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 6313 } 6314 6315 /** 6316 * ice_aq_get_port_options 6317 * @hw: pointer to the hw struct 6318 * @options: buffer for the resultant port options 6319 * @option_count: input - size of the buffer in port options structures, 6320 * output - number of returned port options 6321 * @lport: logical port to call the command with (optional) 6322 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 6323 * when PF owns more than 1 port it must be true 6324 * @active_option_idx: index of active port option in returned buffer 6325 * @active_option_valid: active option in returned buffer is valid 6326 * @pending_option_idx: index of pending port option in returned buffer 6327 * @pending_option_valid: pending option in returned buffer is valid 6328 * 6329 * Calls Get Port Options AQC (0x06ea) and verifies result. 6330 */ 6331 enum ice_status 6332 ice_aq_get_port_options(struct ice_hw *hw, 6333 struct ice_aqc_get_port_options_elem *options, 6334 u8 *option_count, u8 lport, bool lport_valid, 6335 u8 *active_option_idx, bool *active_option_valid, 6336 u8 *pending_option_idx, bool *pending_option_valid) 6337 { 6338 struct ice_aqc_get_port_options *cmd; 6339 struct ice_aq_desc desc; 6340 enum ice_status status; 6341 u8 i; 6342 6343 /* options buffer shall be able to hold max returned options */ 6344 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 6345 return ICE_ERR_PARAM; 6346 6347 cmd = &desc.params.get_port_options; 6348 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 6349 6350 cmd->lport_num = lport; 6351 cmd->lport_num_valid = lport_valid; 6352 6353 status = ice_aq_send_cmd(hw, &desc, options, 6354 *option_count * sizeof(*options), NULL); 6355 if (status != ICE_SUCCESS) 6356 return status; 6357 6358 /* verify direct FW response & set output parameters */ 6359 *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M; 6360 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 6361 *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID; 6362 if (*active_option_valid) { 6363 *active_option_idx = cmd->port_options & 6364 ICE_AQC_PORT_OPT_ACTIVE_M; 6365 if (*active_option_idx > (*option_count - 1)) 6366 return ICE_ERR_OUT_OF_RANGE; 6367 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 6368 *active_option_idx); 6369 } 6370 6371 *pending_option_valid = cmd->pending_port_option_status & 6372 ICE_AQC_PENDING_PORT_OPT_VALID; 6373 if (*pending_option_valid) { 6374 *pending_option_idx = cmd->pending_port_option_status & 6375 ICE_AQC_PENDING_PORT_OPT_IDX_M; 6376 if (*pending_option_idx > (*option_count - 1)) 6377 return ICE_ERR_OUT_OF_RANGE; 6378 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 6379 *pending_option_idx); 6380 } 6381 6382 /* mask output options fields */ 6383 for (i = 0; i < *option_count; i++) { 6384 options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M; 6385 options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M; 6386 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 6387 options[i].pmd, options[i].max_lane_speed); 6388 } 6389 6390 return ICE_SUCCESS; 6391 } 6392 6393 /** 6394 * ice_aq_set_port_option 6395 * @hw: pointer to the hw struct 6396 * @lport: logical port to call the command with 6397 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 6398 * when PF owns more than 1 port it must be true 6399 * @new_option: new port option to be written 6400 * 6401 * Calls Set Port Options AQC (0x06eb). 6402 */ 6403 enum ice_status 6404 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 6405 u8 new_option) 6406 { 6407 struct ice_aqc_set_port_option *cmd; 6408 struct ice_aq_desc desc; 6409 6410 if (new_option >= ICE_AQC_PORT_OPT_COUNT_M) 6411 return ICE_ERR_PARAM; 6412 6413 cmd = &desc.params.set_port_option; 6414 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 6415 6416 cmd->lport_num = lport; 6417 6418 cmd->lport_num_valid = lport_valid; 6419 cmd->selected_port_option = new_option; 6420 6421 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6422 } 6423 6424 /** 6425 * ice_aq_set_lldp_mib - Set the LLDP MIB 6426 * @hw: pointer to the HW struct 6427 * @mib_type: Local, Remote or both Local and Remote MIBs 6428 * @buf: pointer to the caller-supplied buffer to store the MIB block 6429 * @buf_size: size of the buffer (in bytes) 6430 * @cd: pointer to command details structure or NULL 6431 * 6432 * Set the LLDP MIB. (0x0A08) 6433 */ 6434 enum ice_status 6435 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 6436 struct ice_sq_cd *cd) 6437 { 6438 struct ice_aqc_lldp_set_local_mib *cmd; 6439 struct ice_aq_desc desc; 6440 6441 cmd = &desc.params.lldp_set_mib; 6442 6443 if (buf_size == 0 || !buf) 6444 return ICE_ERR_PARAM; 6445 6446 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 6447 6448 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD); 6449 desc.datalen = CPU_TO_LE16(buf_size); 6450 6451 cmd->type = mib_type; 6452 cmd->length = CPU_TO_LE16(buf_size); 6453 6454 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 6455 } 6456 6457 /** 6458 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 6459 * @hw: pointer to HW struct 6460 */ 6461 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 6462 { 6463 if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC) 6464 return false; 6465 6466 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 6467 ICE_FW_API_LLDP_FLTR_MIN, 6468 ICE_FW_API_LLDP_FLTR_PATCH); 6469 } 6470 6471 /** 6472 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6473 * @hw: pointer to HW struct 6474 * @vsi_num: absolute HW index for VSI 6475 * @add: boolean for if adding or removing a filter 6476 */ 6477 enum ice_status 6478 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6479 { 6480 struct ice_aqc_lldp_filter_ctrl *cmd; 6481 struct ice_aq_desc desc; 6482 6483 cmd = &desc.params.lldp_filter_ctrl; 6484 6485 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 6486 6487 if (add) 6488 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 6489 else 6490 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6491 6492 cmd->vsi_num = CPU_TO_LE16(vsi_num); 6493 6494 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6495 } 6496 6497 /** 6498 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6499 * @hw: pointer to HW struct 6500 */ 6501 enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw) 6502 { 6503 struct ice_aq_desc desc; 6504 6505 ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib); 6506 6507 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6508 } 6509 6510 /** 6511 * ice_fw_supports_report_dflt_cfg 6512 * @hw: pointer to the hardware structure 6513 * 6514 * Checks if the firmware supports report default configuration 6515 */ 6516 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6517 { 6518 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6519 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6520 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6521 } 6522 6523 /* each of the indexes into the following array match the speed of a return 6524 * value from the list of AQ returned speeds like the range: 6525 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6526 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) The array is defined as 15 6527 * elements long because the link_speed returned by the firmware is a 16 bit 6528 * value, but is indexed by [fls(speed) - 1] 6529 */ 6530 static const u32 ice_aq_to_link_speed[15] = { 6531 ICE_LINK_SPEED_10MBPS, /* BIT(0) */ 6532 ICE_LINK_SPEED_100MBPS, 6533 ICE_LINK_SPEED_1000MBPS, 6534 ICE_LINK_SPEED_2500MBPS, 6535 ICE_LINK_SPEED_5000MBPS, 6536 ICE_LINK_SPEED_10000MBPS, 6537 ICE_LINK_SPEED_20000MBPS, 6538 ICE_LINK_SPEED_25000MBPS, 6539 ICE_LINK_SPEED_40000MBPS, 6540 ICE_LINK_SPEED_50000MBPS, 6541 ICE_LINK_SPEED_100000MBPS, /* BIT(10) */ 6542 ICE_LINK_SPEED_UNKNOWN, 6543 ICE_LINK_SPEED_UNKNOWN, 6544 ICE_LINK_SPEED_UNKNOWN, 6545 ICE_LINK_SPEED_UNKNOWN /* BIT(14) */ 6546 }; 6547 6548 /** 6549 * ice_get_link_speed - get integer speed from table 6550 * @index: array index from fls(aq speed) - 1 6551 * 6552 * Returns: u32 value containing integer speed 6553 */ 6554 u32 ice_get_link_speed(u16 index) 6555 { 6556 return ice_aq_to_link_speed[index]; 6557 } 6558 6559 /** 6560 * ice_fw_supports_fec_dis_auto 6561 * @hw: pointer to the hardware structure 6562 * 6563 * Checks if the firmware supports FEC disable in Auto FEC mode 6564 */ 6565 bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw) 6566 { 6567 return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH, 6568 ICE_FW_FEC_DIS_AUTO_MAJ, 6569 ICE_FW_FEC_DIS_AUTO_MIN, 6570 ICE_FW_FEC_DIS_AUTO_PATCH); 6571 } 6572 /** 6573 * ice_is_fw_auto_drop_supported 6574 * @hw: pointer to the hardware structure 6575 * 6576 * Checks if the firmware supports auto drop feature 6577 */ 6578 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw) 6579 { 6580 if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ && 6581 hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN) 6582 return true; 6583 return false; 6584 } 6585 6586