1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright (c) 2023, Intel Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * 3. Neither the name of the Intel Corporation nor the names of its 16 * contributors may be used to endorse or promote products derived from 17 * this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include "ice_common.h" 33 #include "ice_sched.h" 34 #include "ice_adminq_cmd.h" 35 36 #include "ice_flow.h" 37 #include "ice_switch.h" 38 39 #define ICE_PF_RESET_WAIT_COUNT 500 40 41 static const char * const ice_link_mode_str_low[] = { 42 ice_arr_elem_idx(0, "100BASE_TX"), 43 ice_arr_elem_idx(1, "100M_SGMII"), 44 ice_arr_elem_idx(2, "1000BASE_T"), 45 ice_arr_elem_idx(3, "1000BASE_SX"), 46 ice_arr_elem_idx(4, "1000BASE_LX"), 47 ice_arr_elem_idx(5, "1000BASE_KX"), 48 ice_arr_elem_idx(6, "1G_SGMII"), 49 ice_arr_elem_idx(7, "2500BASE_T"), 50 ice_arr_elem_idx(8, "2500BASE_X"), 51 ice_arr_elem_idx(9, "2500BASE_KX"), 52 ice_arr_elem_idx(10, "5GBASE_T"), 53 ice_arr_elem_idx(11, "5GBASE_KR"), 54 ice_arr_elem_idx(12, "10GBASE_T"), 55 ice_arr_elem_idx(13, "10G_SFI_DA"), 56 ice_arr_elem_idx(14, "10GBASE_SR"), 57 ice_arr_elem_idx(15, "10GBASE_LR"), 58 ice_arr_elem_idx(16, "10GBASE_KR_CR1"), 59 ice_arr_elem_idx(17, "10G_SFI_AOC_ACC"), 60 ice_arr_elem_idx(18, "10G_SFI_C2C"), 61 ice_arr_elem_idx(19, "25GBASE_T"), 62 ice_arr_elem_idx(20, "25GBASE_CR"), 63 ice_arr_elem_idx(21, "25GBASE_CR_S"), 64 ice_arr_elem_idx(22, "25GBASE_CR1"), 65 ice_arr_elem_idx(23, "25GBASE_SR"), 66 ice_arr_elem_idx(24, "25GBASE_LR"), 67 ice_arr_elem_idx(25, "25GBASE_KR"), 68 ice_arr_elem_idx(26, "25GBASE_KR_S"), 69 ice_arr_elem_idx(27, "25GBASE_KR1"), 70 ice_arr_elem_idx(28, "25G_AUI_AOC_ACC"), 71 ice_arr_elem_idx(29, "25G_AUI_C2C"), 72 ice_arr_elem_idx(30, "40GBASE_CR4"), 73 ice_arr_elem_idx(31, "40GBASE_SR4"), 74 ice_arr_elem_idx(32, "40GBASE_LR4"), 75 ice_arr_elem_idx(33, "40GBASE_KR4"), 76 ice_arr_elem_idx(34, "40G_XLAUI_AOC_ACC"), 77 ice_arr_elem_idx(35, "40G_XLAUI"), 78 ice_arr_elem_idx(36, "50GBASE_CR2"), 79 ice_arr_elem_idx(37, "50GBASE_SR2"), 80 ice_arr_elem_idx(38, "50GBASE_LR2"), 81 ice_arr_elem_idx(39, "50GBASE_KR2"), 82 ice_arr_elem_idx(40, "50G_LAUI2_AOC_ACC"), 83 ice_arr_elem_idx(41, "50G_LAUI2"), 84 ice_arr_elem_idx(42, "50G_AUI2_AOC_ACC"), 85 ice_arr_elem_idx(43, "50G_AUI2"), 86 ice_arr_elem_idx(44, "50GBASE_CP"), 87 ice_arr_elem_idx(45, "50GBASE_SR"), 88 ice_arr_elem_idx(46, "50GBASE_FR"), 89 ice_arr_elem_idx(47, "50GBASE_LR"), 90 ice_arr_elem_idx(48, "50GBASE_KR_PAM4"), 91 ice_arr_elem_idx(49, "50G_AUI1_AOC_ACC"), 92 ice_arr_elem_idx(50, "50G_AUI1"), 93 ice_arr_elem_idx(51, "100GBASE_CR4"), 94 ice_arr_elem_idx(52, "100GBASE_SR4"), 95 ice_arr_elem_idx(53, "100GBASE_LR4"), 96 ice_arr_elem_idx(54, "100GBASE_KR4"), 97 ice_arr_elem_idx(55, "100G_CAUI4_AOC_ACC"), 98 ice_arr_elem_idx(56, "100G_CAUI4"), 99 ice_arr_elem_idx(57, "100G_AUI4_AOC_ACC"), 100 ice_arr_elem_idx(58, "100G_AUI4"), 101 ice_arr_elem_idx(59, "100GBASE_CR_PAM4"), 102 ice_arr_elem_idx(60, "100GBASE_KR_PAM4"), 103 ice_arr_elem_idx(61, "100GBASE_CP2"), 104 ice_arr_elem_idx(62, "100GBASE_SR2"), 105 ice_arr_elem_idx(63, "100GBASE_DR"), 106 }; 107 108 static const char * const ice_link_mode_str_high[] = { 109 ice_arr_elem_idx(0, "100GBASE_KR2_PAM4"), 110 ice_arr_elem_idx(1, "100G_CAUI2_AOC_ACC"), 111 ice_arr_elem_idx(2, "100G_CAUI2"), 112 ice_arr_elem_idx(3, "100G_AUI2_AOC_ACC"), 113 ice_arr_elem_idx(4, "100G_AUI2"), 114 }; 115 116 /** 117 * ice_dump_phy_type - helper function to dump phy_type 118 * @hw: pointer to the HW structure 119 * @low: 64 bit value for phy_type_low 120 * @high: 64 bit value for phy_type_high 121 * @prefix: prefix string to differentiate multiple dumps 122 */ 123 static void 124 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix) 125 { 126 u32 i; 127 128 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, 129 (unsigned long long)low); 130 131 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_low); i++) { 132 if (low & BIT_ULL(i)) 133 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 134 prefix, i, ice_link_mode_str_low[i]); 135 } 136 137 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, 138 (unsigned long long)high); 139 140 for (i = 0; i < ARRAY_SIZE(ice_link_mode_str_high); i++) { 141 if (high & BIT_ULL(i)) 142 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n", 143 prefix, i, ice_link_mode_str_high[i]); 144 } 145 } 146 147 /** 148 * ice_set_mac_type - Sets MAC type 149 * @hw: pointer to the HW structure 150 * 151 * This function sets the MAC type of the adapter based on the 152 * vendor ID and device ID stored in the HW structure. 153 */ 154 enum ice_status ice_set_mac_type(struct ice_hw *hw) 155 { 156 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 157 158 if (hw->vendor_id != ICE_INTEL_VENDOR_ID) 159 return ICE_ERR_DEVICE_NOT_SUPPORTED; 160 161 switch (hw->device_id) { 162 case ICE_DEV_ID_E810C_BACKPLANE: 163 case ICE_DEV_ID_E810C_QSFP: 164 case ICE_DEV_ID_E810C_SFP: 165 case ICE_DEV_ID_E810_XXV_BACKPLANE: 166 case ICE_DEV_ID_E810_XXV_QSFP: 167 case ICE_DEV_ID_E810_XXV_SFP: 168 hw->mac_type = ICE_MAC_E810; 169 break; 170 case ICE_DEV_ID_E822C_10G_BASE_T: 171 case ICE_DEV_ID_E822C_BACKPLANE: 172 case ICE_DEV_ID_E822C_QSFP: 173 case ICE_DEV_ID_E822C_SFP: 174 case ICE_DEV_ID_E822C_SGMII: 175 case ICE_DEV_ID_E822L_10G_BASE_T: 176 case ICE_DEV_ID_E822L_BACKPLANE: 177 case ICE_DEV_ID_E822L_SFP: 178 case ICE_DEV_ID_E822L_SGMII: 179 case ICE_DEV_ID_E823L_10G_BASE_T: 180 case ICE_DEV_ID_E823L_1GBE: 181 case ICE_DEV_ID_E823L_BACKPLANE: 182 case ICE_DEV_ID_E823L_QSFP: 183 case ICE_DEV_ID_E823L_SFP: 184 case ICE_DEV_ID_E823C_10G_BASE_T: 185 case ICE_DEV_ID_E823C_BACKPLANE: 186 case ICE_DEV_ID_E823C_QSFP: 187 case ICE_DEV_ID_E823C_SFP: 188 case ICE_DEV_ID_E823C_SGMII: 189 hw->mac_type = ICE_MAC_GENERIC; 190 break; 191 default: 192 hw->mac_type = ICE_MAC_UNKNOWN; 193 break; 194 } 195 196 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 197 return ICE_SUCCESS; 198 } 199 200 /** 201 * ice_is_e810 202 * @hw: pointer to the hardware structure 203 * 204 * returns true if the device is E810 based, false if not. 205 */ 206 bool ice_is_e810(struct ice_hw *hw) 207 { 208 return hw->mac_type == ICE_MAC_E810; 209 } 210 211 /** 212 * ice_is_e810t 213 * @hw: pointer to the hardware structure 214 * 215 * returns true if the device is E810T based, false if not. 216 */ 217 bool ice_is_e810t(struct ice_hw *hw) 218 { 219 switch (hw->device_id) { 220 case ICE_DEV_ID_E810C_SFP: 221 switch (hw->subsystem_device_id) { 222 case ICE_SUBDEV_ID_E810T: 223 case ICE_SUBDEV_ID_E810T2: 224 case ICE_SUBDEV_ID_E810T3: 225 case ICE_SUBDEV_ID_E810T4: 226 case ICE_SUBDEV_ID_E810T5: 227 case ICE_SUBDEV_ID_E810T7: 228 return true; 229 } 230 break; 231 case ICE_DEV_ID_E810C_QSFP: 232 switch (hw->subsystem_device_id) { 233 case ICE_SUBDEV_ID_E810T2: 234 case ICE_SUBDEV_ID_E810T5: 235 case ICE_SUBDEV_ID_E810T6: 236 return true; 237 } 238 break; 239 default: 240 break; 241 } 242 243 return false; 244 } 245 246 /** 247 * ice_is_e823 248 * @hw: pointer to the hardware structure 249 * 250 * returns true if the device is E823-L or E823-C based, false if not. 251 */ 252 bool ice_is_e823(struct ice_hw *hw) 253 { 254 switch (hw->device_id) { 255 case ICE_DEV_ID_E823L_BACKPLANE: 256 case ICE_DEV_ID_E823L_SFP: 257 case ICE_DEV_ID_E823L_10G_BASE_T: 258 case ICE_DEV_ID_E823L_1GBE: 259 case ICE_DEV_ID_E823L_QSFP: 260 case ICE_DEV_ID_E823C_BACKPLANE: 261 case ICE_DEV_ID_E823C_QSFP: 262 case ICE_DEV_ID_E823C_SFP: 263 case ICE_DEV_ID_E823C_10G_BASE_T: 264 case ICE_DEV_ID_E823C_SGMII: 265 return true; 266 default: 267 return false; 268 } 269 } 270 271 /** 272 * ice_clear_pf_cfg - Clear PF configuration 273 * @hw: pointer to the hardware structure 274 * 275 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 276 * configuration, flow director filters, etc.). 277 */ 278 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 279 { 280 struct ice_aq_desc desc; 281 282 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 283 284 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 285 } 286 287 /** 288 * ice_aq_manage_mac_read - manage MAC address read command 289 * @hw: pointer to the HW struct 290 * @buf: a virtual buffer to hold the manage MAC read response 291 * @buf_size: Size of the virtual buffer 292 * @cd: pointer to command details structure or NULL 293 * 294 * This function is used to return per PF station MAC address (0x0107). 295 * NOTE: Upon successful completion of this command, MAC address information 296 * is returned in user specified buffer. Please interpret user specified 297 * buffer as "manage_mac_read" response. 298 * Response such as various MAC addresses are stored in HW struct (port.mac) 299 * ice_discover_dev_caps is expected to be called before this function is 300 * called. 301 */ 302 enum ice_status 303 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 304 struct ice_sq_cd *cd) 305 { 306 struct ice_aqc_manage_mac_read_resp *resp; 307 struct ice_aqc_manage_mac_read *cmd; 308 struct ice_aq_desc desc; 309 enum ice_status status; 310 u16 flags; 311 u8 i; 312 313 cmd = &desc.params.mac_read; 314 315 if (buf_size < sizeof(*resp)) 316 return ICE_ERR_BUF_TOO_SHORT; 317 318 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 319 320 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 321 if (status) 322 return status; 323 324 resp = (struct ice_aqc_manage_mac_read_resp *)buf; 325 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 326 327 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 328 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 329 return ICE_ERR_CFG; 330 } 331 332 /* A single port can report up to two (LAN and WoL) addresses */ 333 for (i = 0; i < cmd->num_addr; i++) 334 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 335 ice_memcpy(hw->port_info->mac.lan_addr, 336 resp[i].mac_addr, ETH_ALEN, 337 ICE_NONDMA_TO_NONDMA); 338 ice_memcpy(hw->port_info->mac.perm_addr, 339 resp[i].mac_addr, 340 ETH_ALEN, ICE_NONDMA_TO_NONDMA); 341 break; 342 } 343 return ICE_SUCCESS; 344 } 345 346 /** 347 * ice_phy_maps_to_media 348 * @phy_type_low: PHY type low bits 349 * @phy_type_high: PHY type high bits 350 * @media_mask_low: media type PHY type low bitmask 351 * @media_mask_high: media type PHY type high bitmask 352 * 353 * Return true if PHY type [low|high] bits are only of media type PHY types 354 * [low|high] bitmask. 355 */ 356 static bool 357 ice_phy_maps_to_media(u64 phy_type_low, u64 phy_type_high, 358 u64 media_mask_low, u64 media_mask_high) 359 { 360 /* check if a PHY type exist for media type */ 361 if (!(phy_type_low & media_mask_low || 362 phy_type_high & media_mask_high)) 363 return false; 364 365 /* check that PHY types are only of media type */ 366 if (!(phy_type_low & ~media_mask_low) && 367 !(phy_type_high & ~media_mask_high)) 368 return true; 369 370 return false; 371 } 372 373 /** 374 * ice_set_media_type - Sets media type 375 * @pi: port information structure 376 * 377 * Set ice_port_info PHY media type based on PHY type. This should be called 378 * from Get PHY caps with media. 379 */ 380 static void ice_set_media_type(struct ice_port_info *pi) 381 { 382 enum ice_media_type *media_type; 383 u64 phy_type_high, phy_type_low; 384 385 phy_type_high = pi->phy.phy_type_high; 386 phy_type_low = pi->phy.phy_type_low; 387 media_type = &pi->phy.media_type; 388 389 /* if no media, then media type is NONE */ 390 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) 391 *media_type = ICE_MEDIA_NONE; 392 /* else if PHY types are only BASE-T, then media type is BASET */ 393 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high, 394 ICE_MEDIA_BASET_PHY_TYPE_LOW_M, 0)) 395 *media_type = ICE_MEDIA_BASET; 396 /* else if any PHY type is BACKPLANE, then media type is BACKPLANE */ 397 else if (phy_type_low & ICE_MEDIA_BP_PHY_TYPE_LOW_M || 398 phy_type_high & ICE_MEDIA_BP_PHY_TYPE_HIGH_M) 399 *media_type = ICE_MEDIA_BACKPLANE; 400 /* else if PHY types are only optical, or optical and C2M, then media 401 * type is FIBER 402 */ 403 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high, 404 ICE_MEDIA_OPT_PHY_TYPE_LOW_M, 0) || 405 (phy_type_low & ICE_MEDIA_OPT_PHY_TYPE_LOW_M && 406 phy_type_low & ICE_MEDIA_C2M_PHY_TYPE_LOW_M)) 407 *media_type = ICE_MEDIA_FIBER; 408 /* else if PHY types are only DA, or DA and C2C, then media type DA */ 409 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high, 410 ICE_MEDIA_DAC_PHY_TYPE_LOW_M, 0) || 411 (phy_type_low & ICE_MEDIA_DAC_PHY_TYPE_LOW_M && 412 (phy_type_low & ICE_MEDIA_C2C_PHY_TYPE_LOW_M || 413 phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M))) 414 *media_type = ICE_MEDIA_DA; 415 /* else if PHY types are only C2M or only C2C, then media is AUI */ 416 else if (ice_phy_maps_to_media(phy_type_low, phy_type_high, 417 ICE_MEDIA_C2M_PHY_TYPE_LOW_M, 418 ICE_MEDIA_C2M_PHY_TYPE_HIGH_M) || 419 ice_phy_maps_to_media(phy_type_low, phy_type_high, 420 ICE_MEDIA_C2C_PHY_TYPE_LOW_M, 421 ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)) 422 *media_type = ICE_MEDIA_AUI; 423 424 else 425 *media_type = ICE_MEDIA_UNKNOWN; 426 } 427 428 /** 429 * ice_aq_get_phy_caps - returns PHY capabilities 430 * @pi: port information structure 431 * @qual_mods: report qualified modules 432 * @report_mode: report mode capabilities 433 * @pcaps: structure for PHY capabilities to be filled 434 * @cd: pointer to command details structure or NULL 435 * 436 * Returns the various PHY capabilities supported on the Port (0x0600) 437 */ 438 enum ice_status 439 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 440 struct ice_aqc_get_phy_caps_data *pcaps, 441 struct ice_sq_cd *cd) 442 { 443 struct ice_aqc_get_phy_caps *cmd; 444 u16 pcaps_size = sizeof(*pcaps); 445 struct ice_aq_desc desc; 446 enum ice_status status; 447 const char *prefix; 448 struct ice_hw *hw; 449 450 cmd = &desc.params.get_phy; 451 452 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 453 return ICE_ERR_PARAM; 454 hw = pi->hw; 455 456 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 457 !ice_fw_supports_report_dflt_cfg(hw)) 458 return ICE_ERR_PARAM; 459 460 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 461 462 if (qual_mods) 463 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM); 464 465 cmd->param0 |= CPU_TO_LE16(report_mode); 466 467 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 468 469 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n"); 470 471 switch (report_mode) { 472 case ICE_AQC_REPORT_TOPO_CAP_MEDIA: 473 prefix = "phy_caps_media"; 474 break; 475 case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA: 476 prefix = "phy_caps_no_media"; 477 break; 478 case ICE_AQC_REPORT_ACTIVE_CFG: 479 prefix = "phy_caps_active"; 480 break; 481 case ICE_AQC_REPORT_DFLT_CFG: 482 prefix = "phy_caps_default"; 483 break; 484 default: 485 prefix = "phy_caps_invalid"; 486 } 487 488 ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low), 489 LE64_TO_CPU(pcaps->phy_type_high), prefix); 490 491 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n", 492 prefix, report_mode); 493 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps); 494 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix, 495 pcaps->low_power_ctrl_an); 496 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix, 497 pcaps->eee_cap); 498 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix, 499 pcaps->eeer_value); 500 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix, 501 pcaps->link_fec_options); 502 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n", 503 prefix, pcaps->module_compliance_enforcement); 504 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n", 505 prefix, pcaps->extended_compliance_code); 506 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix, 507 pcaps->module_type[0]); 508 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix, 509 pcaps->module_type[1]); 510 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix, 511 pcaps->module_type[2]); 512 513 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 514 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low); 515 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high); 516 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 517 sizeof(pi->phy.link_info.module_type), 518 ICE_NONDMA_TO_NONDMA); 519 ice_set_media_type(pi); 520 ice_debug(hw, ICE_DBG_LINK, "%s: media_type = 0x%x\n", prefix, 521 pi->phy.media_type); 522 } 523 524 return status; 525 } 526 527 /** 528 * ice_aq_get_netlist_node 529 * @hw: pointer to the hw struct 530 * @cmd: get_link_topo AQ structure 531 * @node_part_number: output node part number if node found 532 * @node_handle: output node handle parameter if node found 533 */ 534 enum ice_status 535 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, 536 u8 *node_part_number, u16 *node_handle) 537 { 538 struct ice_aq_desc desc; 539 540 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 541 desc.params.get_link_topo = *cmd; 542 543 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL)) 544 return ICE_ERR_NOT_SUPPORTED; 545 546 if (node_handle) 547 *node_handle = 548 LE16_TO_CPU(desc.params.get_link_topo.addr.handle); 549 if (node_part_number) 550 *node_part_number = desc.params.get_link_topo.node_part_num; 551 552 return ICE_SUCCESS; 553 } 554 555 #define MAX_NETLIST_SIZE 10 556 /** 557 * ice_find_netlist_node 558 * @hw: pointer to the hw struct 559 * @node_type_ctx: type of netlist node to look for 560 * @node_part_number: node part number to look for 561 * @node_handle: output parameter if node found - optional 562 * 563 * Find and return the node handle for a given node type and part number in the 564 * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST 565 * otherwise. If node_handle provided, it would be set to found node handle. 566 */ 567 enum ice_status 568 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number, 569 u16 *node_handle) 570 { 571 struct ice_aqc_get_link_topo cmd; 572 u8 rec_node_part_number; 573 u16 rec_node_handle; 574 u8 idx; 575 576 for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) { 577 enum ice_status status; 578 579 memset(&cmd, 0, sizeof(cmd)); 580 581 cmd.addr.topo_params.node_type_ctx = 582 (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S); 583 cmd.addr.topo_params.index = idx; 584 585 status = ice_aq_get_netlist_node(hw, &cmd, 586 &rec_node_part_number, 587 &rec_node_handle); 588 if (status) 589 return status; 590 591 if (rec_node_part_number == node_part_number) { 592 if (node_handle) 593 *node_handle = rec_node_handle; 594 return ICE_SUCCESS; 595 } 596 } 597 598 return ICE_ERR_DOES_NOT_EXIST; 599 } 600 601 #define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1 602 603 /** 604 * ice_aq_get_link_info 605 * @pi: port information structure 606 * @ena_lse: enable/disable LinkStatusEvent reporting 607 * @link: pointer to link status structure - optional 608 * @cd: pointer to command details structure or NULL 609 * 610 * Get Link Status (0x607). Returns the link status of the adapter. 611 */ 612 enum ice_status 613 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 614 struct ice_link_status *link, struct ice_sq_cd *cd) 615 { 616 struct ice_aqc_get_link_status_data link_data = { 0 }; 617 struct ice_aqc_get_link_status *resp; 618 struct ice_link_status *li_old, *li; 619 struct ice_fc_info *hw_fc_info; 620 bool tx_pause, rx_pause; 621 struct ice_aq_desc desc; 622 enum ice_status status; 623 struct ice_hw *hw; 624 u16 cmd_flags; 625 626 if (!pi) 627 return ICE_ERR_PARAM; 628 hw = pi->hw; 629 630 li_old = &pi->phy.link_info_old; 631 li = &pi->phy.link_info; 632 hw_fc_info = &pi->fc; 633 634 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 635 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 636 resp = &desc.params.get_link_status; 637 resp->cmd_flags = CPU_TO_LE16(cmd_flags); 638 resp->lport_num = pi->lport; 639 640 status = ice_aq_send_cmd(hw, &desc, &link_data, 641 ice_get_link_status_datalen(hw), cd); 642 if (status != ICE_SUCCESS) 643 return status; 644 645 /* save off old link status information */ 646 *li_old = *li; 647 648 /* update current link status information */ 649 li->link_speed = LE16_TO_CPU(link_data.link_speed); 650 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low); 651 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high); 652 li->link_info = link_data.link_info; 653 li->link_cfg_err = link_data.link_cfg_err; 654 li->an_info = link_data.an_info; 655 li->ext_info = link_data.ext_info; 656 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size); 657 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 658 li->topo_media_conflict = link_data.topo_media_conflict; 659 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 660 ICE_AQ_CFG_PACING_TYPE_M); 661 662 /* update fc info */ 663 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 664 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 665 if (tx_pause && rx_pause) 666 hw_fc_info->current_mode = ICE_FC_FULL; 667 else if (tx_pause) 668 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 669 else if (rx_pause) 670 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 671 else 672 hw_fc_info->current_mode = ICE_FC_NONE; 673 674 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED)); 675 676 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 677 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 678 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 679 (unsigned long long)li->phy_type_low); 680 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 681 (unsigned long long)li->phy_type_high); 682 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 683 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 684 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 685 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 686 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 687 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 688 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 689 li->max_frame_size); 690 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 691 692 /* save link status information */ 693 if (link) 694 *link = *li; 695 696 /* flag cleared so calling functions don't call AQ again */ 697 pi->phy.get_link_info = false; 698 699 return ICE_SUCCESS; 700 } 701 702 /** 703 * ice_fill_tx_timer_and_fc_thresh 704 * @hw: pointer to the HW struct 705 * @cmd: pointer to MAC cfg structure 706 * 707 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 708 * descriptor 709 */ 710 static void 711 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 712 struct ice_aqc_set_mac_cfg *cmd) 713 { 714 u16 fc_thres_val, tx_timer_val; 715 u32 val; 716 717 /* We read back the transmit timer and fc threshold value of 718 * LFC. Thus, we will use index = 719 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 720 * 721 * Also, because we are operating on transmit timer and fc 722 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 723 */ 724 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 725 726 /* Retrieve the transmit timer */ 727 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 728 tx_timer_val = val & 729 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 730 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val); 731 732 /* Retrieve the fc threshold */ 733 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 734 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 735 736 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val); 737 } 738 739 /** 740 * ice_aq_set_mac_cfg 741 * @hw: pointer to the HW struct 742 * @max_frame_size: Maximum Frame Size to be supported 743 * @auto_drop: Tell HW to drop packets if TC queue is blocked 744 * @cd: pointer to command details structure or NULL 745 * 746 * Set MAC configuration (0x0603) 747 */ 748 enum ice_status 749 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop, 750 struct ice_sq_cd *cd) 751 { 752 struct ice_aqc_set_mac_cfg *cmd; 753 struct ice_aq_desc desc; 754 755 cmd = &desc.params.set_mac_cfg; 756 757 if (max_frame_size == 0) 758 return ICE_ERR_PARAM; 759 760 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 761 762 cmd->max_frame_size = CPU_TO_LE16(max_frame_size); 763 764 if (ice_is_fw_auto_drop_supported(hw) && auto_drop) 765 cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS; 766 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 767 768 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 769 } 770 771 /** 772 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 773 * @hw: pointer to the HW struct 774 */ 775 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 776 { 777 struct ice_switch_info *sw; 778 enum ice_status status; 779 780 hw->switch_info = (struct ice_switch_info *) 781 ice_malloc(hw, sizeof(*hw->switch_info)); 782 783 sw = hw->switch_info; 784 785 if (!sw) 786 return ICE_ERR_NO_MEMORY; 787 788 INIT_LIST_HEAD(&sw->vsi_list_map_head); 789 sw->prof_res_bm_init = 0; 790 791 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list); 792 if (status) { 793 ice_free(hw, hw->switch_info); 794 return status; 795 } 796 return ICE_SUCCESS; 797 } 798 799 /** 800 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct 801 * @hw: pointer to the HW struct 802 * @sw: pointer to switch info struct for which function clears filters 803 */ 804 static void 805 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw) 806 { 807 struct ice_vsi_list_map_info *v_pos_map; 808 struct ice_vsi_list_map_info *v_tmp_map; 809 struct ice_sw_recipe *recps; 810 u8 i; 811 812 if (!sw) 813 return; 814 815 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 816 ice_vsi_list_map_info, list_entry) { 817 LIST_DEL(&v_pos_map->list_entry); 818 ice_free(hw, v_pos_map); 819 } 820 recps = sw->recp_list; 821 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { 822 struct ice_recp_grp_entry *rg_entry, *tmprg_entry; 823 824 recps[i].root_rid = i; 825 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry, 826 &recps[i].rg_list, ice_recp_grp_entry, 827 l_entry) { 828 LIST_DEL(&rg_entry->l_entry); 829 ice_free(hw, rg_entry); 830 } 831 832 if (recps[i].adv_rule) { 833 struct ice_adv_fltr_mgmt_list_entry *tmp_entry; 834 struct ice_adv_fltr_mgmt_list_entry *lst_itr; 835 836 ice_destroy_lock(&recps[i].filt_rule_lock); 837 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, 838 &recps[i].filt_rules, 839 ice_adv_fltr_mgmt_list_entry, 840 list_entry) { 841 LIST_DEL(&lst_itr->list_entry); 842 ice_free(hw, lst_itr->lkups); 843 ice_free(hw, lst_itr); 844 } 845 } else { 846 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 847 848 ice_destroy_lock(&recps[i].filt_rule_lock); 849 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, 850 &recps[i].filt_rules, 851 ice_fltr_mgmt_list_entry, 852 list_entry) { 853 LIST_DEL(&lst_itr->list_entry); 854 ice_free(hw, lst_itr); 855 } 856 } 857 if (recps[i].root_buf) 858 ice_free(hw, recps[i].root_buf); 859 } 860 ice_rm_sw_replay_rule_info(hw, sw); 861 ice_free(hw, sw->recp_list); 862 ice_free(hw, sw); 863 } 864 865 /** 866 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 867 * @hw: pointer to the HW struct 868 */ 869 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 870 { 871 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info); 872 } 873 874 /** 875 * ice_get_itr_intrl_gran 876 * @hw: pointer to the HW struct 877 * 878 * Determines the ITR/INTRL granularities based on the maximum aggregate 879 * bandwidth according to the device's configuration during power-on. 880 */ 881 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 882 { 883 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 884 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 885 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 886 887 switch (max_agg_bw) { 888 case ICE_MAX_AGG_BW_200G: 889 case ICE_MAX_AGG_BW_100G: 890 case ICE_MAX_AGG_BW_50G: 891 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 892 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 893 break; 894 case ICE_MAX_AGG_BW_25G: 895 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 896 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 897 break; 898 } 899 } 900 901 /** 902 * ice_print_rollback_msg - print FW rollback message 903 * @hw: pointer to the hardware structure 904 */ 905 void ice_print_rollback_msg(struct ice_hw *hw) 906 { 907 char nvm_str[ICE_NVM_VER_LEN] = { 0 }; 908 struct ice_orom_info *orom; 909 struct ice_nvm_info *nvm; 910 911 orom = &hw->flash.orom; 912 nvm = &hw->flash.nvm; 913 914 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d", 915 nvm->major, nvm->minor, nvm->eetrack, orom->major, 916 orom->build, orom->patch); 917 ice_warn(hw, 918 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n", 919 nvm_str, hw->fw_maj_ver, hw->fw_min_ver); 920 } 921 922 /** 923 * ice_set_umac_shared 924 * @hw: pointer to the hw struct 925 * 926 * Set boolean flag to allow unicast MAC sharing 927 */ 928 void ice_set_umac_shared(struct ice_hw *hw) 929 { 930 hw->umac_shared = true; 931 } 932 933 /** 934 * ice_init_hw - main hardware initialization routine 935 * @hw: pointer to the hardware structure 936 */ 937 enum ice_status ice_init_hw(struct ice_hw *hw) 938 { 939 struct ice_aqc_get_phy_caps_data *pcaps; 940 enum ice_status status; 941 u16 mac_buf_len; 942 void *mac_buf; 943 944 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 945 946 /* Set MAC type based on DeviceID */ 947 status = ice_set_mac_type(hw); 948 if (status) 949 return status; 950 951 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 952 PF_FUNC_RID_FUNCTION_NUMBER_M) >> 953 PF_FUNC_RID_FUNCTION_NUMBER_S; 954 955 status = ice_reset(hw, ICE_RESET_PFR); 956 if (status) 957 return status; 958 ice_get_itr_intrl_gran(hw); 959 960 status = ice_create_all_ctrlq(hw); 961 if (status) 962 goto err_unroll_cqinit; 963 964 ice_fwlog_set_support_ena(hw); 965 status = ice_fwlog_set(hw, &hw->fwlog_cfg); 966 if (status) { 967 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n", 968 status); 969 } else { 970 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) { 971 status = ice_fwlog_register(hw); 972 if (status) 973 ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n", 974 status); 975 } else { 976 status = ice_fwlog_unregister(hw); 977 if (status) 978 ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n", 979 status); 980 } 981 } 982 983 status = ice_init_nvm(hw); 984 if (status) 985 goto err_unroll_cqinit; 986 987 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK) 988 ice_print_rollback_msg(hw); 989 990 status = ice_clear_pf_cfg(hw); 991 if (status) 992 goto err_unroll_cqinit; 993 994 ice_clear_pxe_mode(hw); 995 996 status = ice_get_caps(hw); 997 if (status) 998 goto err_unroll_cqinit; 999 1000 if (!hw->port_info) 1001 hw->port_info = (struct ice_port_info *) 1002 ice_malloc(hw, sizeof(*hw->port_info)); 1003 if (!hw->port_info) { 1004 status = ICE_ERR_NO_MEMORY; 1005 goto err_unroll_cqinit; 1006 } 1007 1008 /* set the back pointer to HW */ 1009 hw->port_info->hw = hw; 1010 1011 /* Initialize port_info struct with switch configuration data */ 1012 status = ice_get_initial_sw_cfg(hw); 1013 if (status) 1014 goto err_unroll_alloc; 1015 1016 hw->evb_veb = true; 1017 /* Query the allocated resources for Tx scheduler */ 1018 status = ice_sched_query_res_alloc(hw); 1019 if (status) { 1020 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 1021 goto err_unroll_alloc; 1022 } 1023 ice_sched_get_psm_clk_freq(hw); 1024 1025 /* Initialize port_info struct with scheduler data */ 1026 status = ice_sched_init_port(hw->port_info); 1027 if (status) 1028 goto err_unroll_sched; 1029 pcaps = (struct ice_aqc_get_phy_caps_data *) 1030 ice_malloc(hw, sizeof(*pcaps)); 1031 if (!pcaps) { 1032 status = ICE_ERR_NO_MEMORY; 1033 goto err_unroll_sched; 1034 } 1035 1036 /* Initialize port_info struct with PHY capabilities */ 1037 status = ice_aq_get_phy_caps(hw->port_info, false, 1038 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); 1039 ice_free(hw, pcaps); 1040 if (status) 1041 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n", 1042 status); 1043 1044 /* Initialize port_info struct with link information */ 1045 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 1046 if (status) 1047 goto err_unroll_sched; 1048 /* need a valid SW entry point to build a Tx tree */ 1049 if (!hw->sw_entry_point_layer) { 1050 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 1051 status = ICE_ERR_CFG; 1052 goto err_unroll_sched; 1053 } 1054 INIT_LIST_HEAD(&hw->agg_list); 1055 /* Initialize max burst size */ 1056 if (!hw->max_burst_size) 1057 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 1058 status = ice_init_fltr_mgmt_struct(hw); 1059 if (status) 1060 goto err_unroll_sched; 1061 1062 /* Get MAC information */ 1063 1064 /* A single port can report up to two (LAN and WoL) addresses */ 1065 mac_buf = ice_calloc(hw, 2, 1066 sizeof(struct ice_aqc_manage_mac_read_resp)); 1067 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 1068 1069 if (!mac_buf) { 1070 status = ICE_ERR_NO_MEMORY; 1071 goto err_unroll_fltr_mgmt_struct; 1072 } 1073 1074 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 1075 ice_free(hw, mac_buf); 1076 1077 if (status) 1078 goto err_unroll_fltr_mgmt_struct; 1079 1080 /* enable jumbo frame support at MAC level */ 1081 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false, 1082 NULL); 1083 if (status) 1084 goto err_unroll_fltr_mgmt_struct; 1085 1086 status = ice_init_hw_tbls(hw); 1087 if (status) 1088 goto err_unroll_fltr_mgmt_struct; 1089 ice_init_lock(&hw->tnl_lock); 1090 1091 return ICE_SUCCESS; 1092 1093 err_unroll_fltr_mgmt_struct: 1094 ice_cleanup_fltr_mgmt_struct(hw); 1095 err_unroll_sched: 1096 ice_sched_cleanup_all(hw); 1097 err_unroll_alloc: 1098 ice_free(hw, hw->port_info); 1099 hw->port_info = NULL; 1100 err_unroll_cqinit: 1101 ice_destroy_all_ctrlq(hw); 1102 return status; 1103 } 1104 1105 /** 1106 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1107 * @hw: pointer to the hardware structure 1108 * 1109 * This should be called only during nominal operation, not as a result of 1110 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1111 * applicable initializations if it fails for any reason. 1112 */ 1113 void ice_deinit_hw(struct ice_hw *hw) 1114 { 1115 ice_cleanup_fltr_mgmt_struct(hw); 1116 1117 ice_sched_cleanup_all(hw); 1118 ice_sched_clear_agg(hw); 1119 ice_free_seg(hw); 1120 ice_free_hw_tbls(hw); 1121 ice_destroy_lock(&hw->tnl_lock); 1122 1123 if (hw->port_info) { 1124 ice_free(hw, hw->port_info); 1125 hw->port_info = NULL; 1126 } 1127 1128 ice_destroy_all_ctrlq(hw); 1129 1130 /* Clear VSI contexts if not already cleared */ 1131 ice_clear_all_vsi_ctx(hw); 1132 } 1133 1134 /** 1135 * ice_check_reset - Check to see if a global reset is complete 1136 * @hw: pointer to the hardware structure 1137 */ 1138 enum ice_status ice_check_reset(struct ice_hw *hw) 1139 { 1140 u32 cnt, reg = 0, grst_timeout, uld_mask, reset_wait_cnt; 1141 1142 /* Poll for Device Active state in case a recent CORER, GLOBR, 1143 * or EMPR has occurred. The grst delay value is in 100ms units. 1144 * Add 1sec for outstanding AQ commands that can take a long time. 1145 */ 1146 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1147 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1148 1149 for (cnt = 0; cnt < grst_timeout; cnt++) { 1150 ice_msec_delay(100, true); 1151 reg = rd32(hw, GLGEN_RSTAT); 1152 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1153 break; 1154 } 1155 1156 if (cnt == grst_timeout) { 1157 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1158 return ICE_ERR_RESET_FAILED; 1159 } 1160 1161 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1162 GLNVM_ULD_PCIER_DONE_1_M |\ 1163 GLNVM_ULD_CORER_DONE_M |\ 1164 GLNVM_ULD_GLOBR_DONE_M |\ 1165 GLNVM_ULD_POR_DONE_M |\ 1166 GLNVM_ULD_POR_DONE_1_M |\ 1167 GLNVM_ULD_PCIER_DONE_2_M) 1168 1169 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ? 1170 GLNVM_ULD_PE_DONE_M : 0); 1171 1172 reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT; 1173 1174 /* Device is Active; check Global Reset processes are done */ 1175 for (cnt = 0; cnt < reset_wait_cnt; cnt++) { 1176 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1177 if (reg == uld_mask) { 1178 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1179 break; 1180 } 1181 ice_msec_delay(10, true); 1182 } 1183 1184 if (cnt == reset_wait_cnt) { 1185 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1186 reg); 1187 return ICE_ERR_RESET_FAILED; 1188 } 1189 1190 return ICE_SUCCESS; 1191 } 1192 1193 /** 1194 * ice_pf_reset - Reset the PF 1195 * @hw: pointer to the hardware structure 1196 * 1197 * If a global reset has been triggered, this function checks 1198 * for its completion and then issues the PF reset 1199 */ 1200 static enum ice_status ice_pf_reset(struct ice_hw *hw) 1201 { 1202 u32 cnt, reg, reset_wait_cnt, cfg_lock_timeout; 1203 1204 /* If at function entry a global reset was already in progress, i.e. 1205 * state is not 'device active' or any of the reset done bits are not 1206 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1207 * global reset is done. 1208 */ 1209 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1210 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1211 /* poll on global reset currently in progress until done */ 1212 if (ice_check_reset(hw)) 1213 return ICE_ERR_RESET_FAILED; 1214 1215 return ICE_SUCCESS; 1216 } 1217 1218 /* Reset the PF */ 1219 reg = rd32(hw, PFGEN_CTRL); 1220 1221 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1222 1223 /* Wait for the PFR to complete. The wait time is the global config lock 1224 * timeout plus the PFR timeout which will account for a possible reset 1225 * that is occurring during a download package operation. 1226 */ 1227 reset_wait_cnt = ICE_PF_RESET_WAIT_COUNT; 1228 cfg_lock_timeout = ICE_GLOBAL_CFG_LOCK_TIMEOUT; 1229 1230 for (cnt = 0; cnt < cfg_lock_timeout + reset_wait_cnt; cnt++) { 1231 reg = rd32(hw, PFGEN_CTRL); 1232 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1233 break; 1234 1235 ice_msec_delay(1, true); 1236 } 1237 1238 if (cnt == cfg_lock_timeout + reset_wait_cnt) { 1239 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1240 return ICE_ERR_RESET_FAILED; 1241 } 1242 1243 return ICE_SUCCESS; 1244 } 1245 1246 /** 1247 * ice_reset - Perform different types of reset 1248 * @hw: pointer to the hardware structure 1249 * @req: reset request 1250 * 1251 * This function triggers a reset as specified by the req parameter. 1252 * 1253 * Note: 1254 * If anything other than a PF reset is triggered, PXE mode is restored. 1255 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1256 * interface has been restored in the rebuild flow. 1257 */ 1258 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1259 { 1260 u32 val = 0; 1261 1262 switch (req) { 1263 case ICE_RESET_PFR: 1264 return ice_pf_reset(hw); 1265 case ICE_RESET_CORER: 1266 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1267 val = GLGEN_RTRIG_CORER_M; 1268 break; 1269 case ICE_RESET_GLOBR: 1270 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1271 val = GLGEN_RTRIG_GLOBR_M; 1272 break; 1273 default: 1274 return ICE_ERR_PARAM; 1275 } 1276 1277 val |= rd32(hw, GLGEN_RTRIG); 1278 wr32(hw, GLGEN_RTRIG, val); 1279 ice_flush(hw); 1280 1281 /* wait for the FW to be ready */ 1282 return ice_check_reset(hw); 1283 } 1284 1285 /** 1286 * ice_copy_rxq_ctx_to_hw 1287 * @hw: pointer to the hardware structure 1288 * @ice_rxq_ctx: pointer to the rxq context 1289 * @rxq_index: the index of the Rx queue 1290 * 1291 * Copies rxq context from dense structure to HW register space 1292 */ 1293 static enum ice_status 1294 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1295 { 1296 u8 i; 1297 1298 if (!ice_rxq_ctx) 1299 return ICE_ERR_BAD_PTR; 1300 1301 if (rxq_index > QRX_CTRL_MAX_INDEX) 1302 return ICE_ERR_PARAM; 1303 1304 /* Copy each dword separately to HW */ 1305 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1306 wr32(hw, QRX_CONTEXT(i, rxq_index), 1307 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1308 1309 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1310 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1311 } 1312 1313 return ICE_SUCCESS; 1314 } 1315 1316 /** 1317 * ice_copy_rxq_ctx_from_hw - Copy rxq context register from HW 1318 * @hw: pointer to the hardware structure 1319 * @ice_rxq_ctx: pointer to the rxq context 1320 * @rxq_index: the index of the Rx queue 1321 * 1322 * Copies rxq context from HW register space to dense structure 1323 */ 1324 static enum ice_status 1325 ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1326 { 1327 u8 i; 1328 1329 if (!ice_rxq_ctx) 1330 return ICE_ERR_BAD_PTR; 1331 1332 if (rxq_index > QRX_CTRL_MAX_INDEX) 1333 return ICE_ERR_PARAM; 1334 1335 /* Copy each dword separately from HW */ 1336 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1337 u32 *ctx = (u32 *)(ice_rxq_ctx + (i * sizeof(u32))); 1338 1339 *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index)); 1340 1341 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx); 1342 } 1343 1344 return ICE_SUCCESS; 1345 } 1346 1347 /* LAN Rx Queue Context */ 1348 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1349 /* Field Width LSB */ 1350 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1351 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1352 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1353 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1354 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1355 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1356 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1357 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1358 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1359 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1360 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1361 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1362 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1363 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1364 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1365 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1366 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1367 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1368 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1369 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1370 { 0 } 1371 }; 1372 1373 /** 1374 * ice_write_rxq_ctx 1375 * @hw: pointer to the hardware structure 1376 * @rlan_ctx: pointer to the rxq context 1377 * @rxq_index: the index of the Rx queue 1378 * 1379 * Converts rxq context from sparse to dense structure and then writes 1380 * it to HW register space and enables the hardware to prefetch descriptors 1381 * instead of only fetching them on demand 1382 */ 1383 enum ice_status 1384 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1385 u32 rxq_index) 1386 { 1387 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1388 1389 if (!rlan_ctx) 1390 return ICE_ERR_BAD_PTR; 1391 1392 rlan_ctx->prefena = 1; 1393 1394 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1395 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1396 } 1397 1398 /** 1399 * ice_read_rxq_ctx - Read rxq context from HW 1400 * @hw: pointer to the hardware structure 1401 * @rlan_ctx: pointer to the rxq context 1402 * @rxq_index: the index of the Rx queue 1403 * 1404 * Read rxq context from HW register space and then converts it from dense 1405 * structure to sparse 1406 */ 1407 enum ice_status 1408 ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1409 u32 rxq_index) 1410 { 1411 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1412 enum ice_status status; 1413 1414 if (!rlan_ctx) 1415 return ICE_ERR_BAD_PTR; 1416 1417 status = ice_copy_rxq_ctx_from_hw(hw, ctx_buf, rxq_index); 1418 if (status) 1419 return status; 1420 1421 return ice_get_ctx(ctx_buf, (u8 *)rlan_ctx, ice_rlan_ctx_info); 1422 } 1423 1424 /** 1425 * ice_clear_rxq_ctx 1426 * @hw: pointer to the hardware structure 1427 * @rxq_index: the index of the Rx queue to clear 1428 * 1429 * Clears rxq context in HW register space 1430 */ 1431 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index) 1432 { 1433 u8 i; 1434 1435 if (rxq_index > QRX_CTRL_MAX_INDEX) 1436 return ICE_ERR_PARAM; 1437 1438 /* Clear each dword register separately */ 1439 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) 1440 wr32(hw, QRX_CONTEXT(i, rxq_index), 0); 1441 1442 return ICE_SUCCESS; 1443 } 1444 1445 /* LAN Tx Queue Context used for set Tx config by ice_aqc_opc_add_txqs, 1446 * Bit[0-175] is valid 1447 */ 1448 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1449 /* Field Width LSB */ 1450 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1451 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1452 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1453 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1454 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1455 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1456 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1457 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1458 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1459 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1460 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1461 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1462 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1463 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1464 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1465 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1466 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1467 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1468 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1469 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1470 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1471 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1472 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1473 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1474 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1475 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1476 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1477 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1478 { 0 } 1479 }; 1480 1481 /** 1482 * ice_copy_tx_cmpltnq_ctx_to_hw 1483 * @hw: pointer to the hardware structure 1484 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context 1485 * @tx_cmpltnq_index: the index of the completion queue 1486 * 1487 * Copies Tx completion queue context from dense structure to HW register space 1488 */ 1489 static enum ice_status 1490 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx, 1491 u32 tx_cmpltnq_index) 1492 { 1493 u8 i; 1494 1495 if (!ice_tx_cmpltnq_ctx) 1496 return ICE_ERR_BAD_PTR; 1497 1498 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) 1499 return ICE_ERR_PARAM; 1500 1501 /* Copy each dword separately to HW */ 1502 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) { 1503 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 1504 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); 1505 1506 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i, 1507 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32))))); 1508 } 1509 1510 return ICE_SUCCESS; 1511 } 1512 1513 /* LAN Tx Completion Queue Context */ 1514 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = { 1515 /* Field Width LSB */ 1516 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0), 1517 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64), 1518 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96), 1519 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97), 1520 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128), 1521 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131), 1522 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141), 1523 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160), 1524 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161), 1525 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192), 1526 { 0 } 1527 }; 1528 1529 /** 1530 * ice_write_tx_cmpltnq_ctx 1531 * @hw: pointer to the hardware structure 1532 * @tx_cmpltnq_ctx: pointer to the completion queue context 1533 * @tx_cmpltnq_index: the index of the completion queue 1534 * 1535 * Converts completion queue context from sparse to dense structure and then 1536 * writes it to HW register space 1537 */ 1538 enum ice_status 1539 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw, 1540 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx, 1541 u32 tx_cmpltnq_index) 1542 { 1543 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; 1544 1545 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info); 1546 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index); 1547 } 1548 1549 /** 1550 * ice_clear_tx_cmpltnq_ctx 1551 * @hw: pointer to the hardware structure 1552 * @tx_cmpltnq_index: the index of the completion queue to clear 1553 * 1554 * Clears Tx completion queue context in HW register space 1555 */ 1556 enum ice_status 1557 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index) 1558 { 1559 u8 i; 1560 1561 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX) 1562 return ICE_ERR_PARAM; 1563 1564 /* Clear each dword register separately */ 1565 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) 1566 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0); 1567 1568 return ICE_SUCCESS; 1569 } 1570 1571 /** 1572 * ice_copy_tx_drbell_q_ctx_to_hw 1573 * @hw: pointer to the hardware structure 1574 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context 1575 * @tx_drbell_q_index: the index of the doorbell queue 1576 * 1577 * Copies doorbell queue context from dense structure to HW register space 1578 */ 1579 static enum ice_status 1580 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx, 1581 u32 tx_drbell_q_index) 1582 { 1583 u8 i; 1584 1585 if (!ice_tx_drbell_q_ctx) 1586 return ICE_ERR_BAD_PTR; 1587 1588 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) 1589 return ICE_ERR_PARAM; 1590 1591 /* Copy each dword separately to HW */ 1592 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) { 1593 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 1594 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); 1595 1596 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i, 1597 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32))))); 1598 } 1599 1600 return ICE_SUCCESS; 1601 } 1602 1603 /* LAN Tx Doorbell Queue Context info */ 1604 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = { 1605 /* Field Width LSB */ 1606 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0), 1607 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64), 1608 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80), 1609 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84), 1610 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94), 1611 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96), 1612 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104), 1613 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108), 1614 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112), 1615 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128), 1616 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144), 1617 { 0 } 1618 }; 1619 1620 /** 1621 * ice_write_tx_drbell_q_ctx 1622 * @hw: pointer to the hardware structure 1623 * @tx_drbell_q_ctx: pointer to the doorbell queue context 1624 * @tx_drbell_q_index: the index of the doorbell queue 1625 * 1626 * Converts doorbell queue context from sparse to dense structure and then 1627 * writes it to HW register space 1628 */ 1629 enum ice_status 1630 ice_write_tx_drbell_q_ctx(struct ice_hw *hw, 1631 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx, 1632 u32 tx_drbell_q_index) 1633 { 1634 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 }; 1635 1636 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf, 1637 ice_tx_drbell_q_ctx_info); 1638 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index); 1639 } 1640 1641 /** 1642 * ice_clear_tx_drbell_q_ctx 1643 * @hw: pointer to the hardware structure 1644 * @tx_drbell_q_index: the index of the doorbell queue to clear 1645 * 1646 * Clears doorbell queue context in HW register space 1647 */ 1648 enum ice_status 1649 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index) 1650 { 1651 u8 i; 1652 1653 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX) 1654 return ICE_ERR_PARAM; 1655 1656 /* Clear each dword register separately */ 1657 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) 1658 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0); 1659 1660 return ICE_SUCCESS; 1661 } 1662 1663 /* FW Admin Queue command wrappers */ 1664 1665 /** 1666 * ice_should_retry_sq_send_cmd 1667 * @opcode: AQ opcode 1668 * 1669 * Decide if we should retry the send command routine for the ATQ, depending 1670 * on the opcode. 1671 */ 1672 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1673 { 1674 switch (opcode) { 1675 case ice_aqc_opc_dnl_get_status: 1676 case ice_aqc_opc_dnl_run: 1677 case ice_aqc_opc_dnl_call: 1678 case ice_aqc_opc_dnl_read_sto: 1679 case ice_aqc_opc_dnl_write_sto: 1680 case ice_aqc_opc_dnl_set_breakpoints: 1681 case ice_aqc_opc_dnl_read_log: 1682 case ice_aqc_opc_get_link_topo: 1683 case ice_aqc_opc_done_alt_write: 1684 case ice_aqc_opc_lldp_stop: 1685 case ice_aqc_opc_lldp_start: 1686 case ice_aqc_opc_lldp_filter_ctrl: 1687 return true; 1688 } 1689 1690 return false; 1691 } 1692 1693 /** 1694 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1695 * @hw: pointer to the HW struct 1696 * @cq: pointer to the specific Control queue 1697 * @desc: prefilled descriptor describing the command 1698 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1699 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1700 * @cd: pointer to command details structure 1701 * 1702 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1703 * Queue if the EBUSY AQ error is returned. 1704 */ 1705 static enum ice_status 1706 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1707 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1708 struct ice_sq_cd *cd) 1709 { 1710 struct ice_aq_desc desc_cpy; 1711 enum ice_status status; 1712 bool is_cmd_for_retry; 1713 u8 *buf_cpy = NULL; 1714 u8 idx = 0; 1715 u16 opcode; 1716 1717 opcode = LE16_TO_CPU(desc->opcode); 1718 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1719 ice_memset(&desc_cpy, 0, sizeof(desc_cpy), ICE_NONDMA_MEM); 1720 1721 if (is_cmd_for_retry) { 1722 if (buf) { 1723 buf_cpy = (u8 *)ice_malloc(hw, buf_size); 1724 if (!buf_cpy) 1725 return ICE_ERR_NO_MEMORY; 1726 } 1727 1728 ice_memcpy(&desc_cpy, desc, sizeof(desc_cpy), 1729 ICE_NONDMA_TO_NONDMA); 1730 } 1731 1732 do { 1733 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1734 1735 if (!is_cmd_for_retry || status == ICE_SUCCESS || 1736 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1737 break; 1738 1739 if (buf_cpy) 1740 ice_memcpy(buf, buf_cpy, buf_size, 1741 ICE_NONDMA_TO_NONDMA); 1742 1743 ice_memcpy(desc, &desc_cpy, sizeof(desc_cpy), 1744 ICE_NONDMA_TO_NONDMA); 1745 1746 ice_msec_delay(ICE_SQ_SEND_DELAY_TIME_MS, false); 1747 1748 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1749 1750 if (buf_cpy) 1751 ice_free(hw, buf_cpy); 1752 1753 return status; 1754 } 1755 1756 /** 1757 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1758 * @hw: pointer to the HW struct 1759 * @desc: descriptor describing the command 1760 * @buf: buffer to use for indirect commands (NULL for direct commands) 1761 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1762 * @cd: pointer to command details structure 1763 * 1764 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1765 */ 1766 enum ice_status 1767 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1768 u16 buf_size, struct ice_sq_cd *cd) 1769 { 1770 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1771 } 1772 1773 /** 1774 * ice_aq_get_fw_ver 1775 * @hw: pointer to the HW struct 1776 * @cd: pointer to command details structure or NULL 1777 * 1778 * Get the firmware version (0x0001) from the admin queue commands 1779 */ 1780 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1781 { 1782 struct ice_aqc_get_ver *resp; 1783 struct ice_aq_desc desc; 1784 enum ice_status status; 1785 1786 resp = &desc.params.get_ver; 1787 1788 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1789 1790 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1791 1792 if (!status) { 1793 hw->fw_branch = resp->fw_branch; 1794 hw->fw_maj_ver = resp->fw_major; 1795 hw->fw_min_ver = resp->fw_minor; 1796 hw->fw_patch = resp->fw_patch; 1797 hw->fw_build = LE32_TO_CPU(resp->fw_build); 1798 hw->api_branch = resp->api_branch; 1799 hw->api_maj_ver = resp->api_major; 1800 hw->api_min_ver = resp->api_minor; 1801 hw->api_patch = resp->api_patch; 1802 } 1803 1804 return status; 1805 } 1806 1807 /** 1808 * ice_aq_send_driver_ver 1809 * @hw: pointer to the HW struct 1810 * @dv: driver's major, minor version 1811 * @cd: pointer to command details structure or NULL 1812 * 1813 * Send the driver version (0x0002) to the firmware 1814 */ 1815 enum ice_status 1816 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1817 struct ice_sq_cd *cd) 1818 { 1819 struct ice_aqc_driver_ver *cmd; 1820 struct ice_aq_desc desc; 1821 u16 len; 1822 1823 cmd = &desc.params.driver_ver; 1824 1825 if (!dv) 1826 return ICE_ERR_PARAM; 1827 1828 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1829 1830 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 1831 cmd->major_ver = dv->major_ver; 1832 cmd->minor_ver = dv->minor_ver; 1833 cmd->build_ver = dv->build_ver; 1834 cmd->subbuild_ver = dv->subbuild_ver; 1835 1836 len = 0; 1837 while (len < sizeof(dv->driver_string) && 1838 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len]) 1839 len++; 1840 1841 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1842 } 1843 1844 /** 1845 * ice_aq_q_shutdown 1846 * @hw: pointer to the HW struct 1847 * @unloading: is the driver unloading itself 1848 * 1849 * Tell the Firmware that we're shutting down the AdminQ and whether 1850 * or not the driver is unloading as well (0x0003). 1851 */ 1852 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1853 { 1854 struct ice_aqc_q_shutdown *cmd; 1855 struct ice_aq_desc desc; 1856 1857 cmd = &desc.params.q_shutdown; 1858 1859 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1860 1861 if (unloading) 1862 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1863 1864 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1865 } 1866 1867 /** 1868 * ice_aq_req_res 1869 * @hw: pointer to the HW struct 1870 * @res: resource ID 1871 * @access: access type 1872 * @sdp_number: resource number 1873 * @timeout: the maximum time in ms that the driver may hold the resource 1874 * @cd: pointer to command details structure or NULL 1875 * 1876 * Requests common resource using the admin queue commands (0x0008). 1877 * When attempting to acquire the Global Config Lock, the driver can 1878 * learn of three states: 1879 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1880 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1881 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1882 * successfully downloaded the package; the driver does 1883 * not have to download the package and can continue 1884 * loading 1885 * 1886 * Note that if the caller is in an acquire lock, perform action, release lock 1887 * phase of operation, it is possible that the FW may detect a timeout and issue 1888 * a CORER. In this case, the driver will receive a CORER interrupt and will 1889 * have to determine its cause. The calling thread that is handling this flow 1890 * will likely get an error propagated back to it indicating the Download 1891 * Package, Update Package or the Release Resource AQ commands timed out. 1892 */ 1893 static enum ice_status 1894 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1895 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1896 struct ice_sq_cd *cd) 1897 { 1898 struct ice_aqc_req_res *cmd_resp; 1899 struct ice_aq_desc desc; 1900 enum ice_status status; 1901 1902 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1903 1904 cmd_resp = &desc.params.res_owner; 1905 1906 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1907 1908 cmd_resp->res_id = CPU_TO_LE16(res); 1909 cmd_resp->access_type = CPU_TO_LE16(access); 1910 cmd_resp->res_number = CPU_TO_LE32(sdp_number); 1911 cmd_resp->timeout = CPU_TO_LE32(*timeout); 1912 *timeout = 0; 1913 1914 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1915 1916 /* The completion specifies the maximum time in ms that the driver 1917 * may hold the resource in the Timeout field. 1918 */ 1919 1920 /* Global config lock response utilizes an additional status field. 1921 * 1922 * If the Global config lock resource is held by some other driver, the 1923 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1924 * and the timeout field indicates the maximum time the current owner 1925 * of the resource has to free it. 1926 */ 1927 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1928 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1929 *timeout = LE32_TO_CPU(cmd_resp->timeout); 1930 return ICE_SUCCESS; 1931 } else if (LE16_TO_CPU(cmd_resp->status) == 1932 ICE_AQ_RES_GLBL_IN_PROG) { 1933 *timeout = LE32_TO_CPU(cmd_resp->timeout); 1934 return ICE_ERR_AQ_ERROR; 1935 } else if (LE16_TO_CPU(cmd_resp->status) == 1936 ICE_AQ_RES_GLBL_DONE) { 1937 return ICE_ERR_AQ_NO_WORK; 1938 } 1939 1940 /* invalid FW response, force a timeout immediately */ 1941 *timeout = 0; 1942 return ICE_ERR_AQ_ERROR; 1943 } 1944 1945 /* If the resource is held by some other driver, the command completes 1946 * with a busy return value and the timeout field indicates the maximum 1947 * time the current owner of the resource has to free it. 1948 */ 1949 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1950 *timeout = LE32_TO_CPU(cmd_resp->timeout); 1951 1952 return status; 1953 } 1954 1955 /** 1956 * ice_aq_release_res 1957 * @hw: pointer to the HW struct 1958 * @res: resource ID 1959 * @sdp_number: resource number 1960 * @cd: pointer to command details structure or NULL 1961 * 1962 * release common resource using the admin queue commands (0x0009) 1963 */ 1964 static enum ice_status 1965 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1966 struct ice_sq_cd *cd) 1967 { 1968 struct ice_aqc_req_res *cmd; 1969 struct ice_aq_desc desc; 1970 1971 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 1972 1973 cmd = &desc.params.res_owner; 1974 1975 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1976 1977 cmd->res_id = CPU_TO_LE16(res); 1978 cmd->res_number = CPU_TO_LE32(sdp_number); 1979 1980 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1981 } 1982 1983 /** 1984 * ice_acquire_res 1985 * @hw: pointer to the HW structure 1986 * @res: resource ID 1987 * @access: access type (read or write) 1988 * @timeout: timeout in milliseconds 1989 * 1990 * This function will attempt to acquire the ownership of a resource. 1991 */ 1992 enum ice_status 1993 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1994 enum ice_aq_res_access_type access, u32 timeout) 1995 { 1996 #define ICE_RES_POLLING_DELAY_MS 10 1997 u32 delay = ICE_RES_POLLING_DELAY_MS; 1998 u32 time_left = timeout; 1999 enum ice_status status; 2000 2001 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2002 2003 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2004 2005 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 2006 * previously acquired the resource and performed any necessary updates; 2007 * in this case the caller does not obtain the resource and has no 2008 * further work to do. 2009 */ 2010 if (status == ICE_ERR_AQ_NO_WORK) 2011 goto ice_acquire_res_exit; 2012 2013 if (status) 2014 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 2015 2016 /* If necessary, poll until the current lock owner timeouts */ 2017 timeout = time_left; 2018 while (status && timeout && time_left) { 2019 ice_msec_delay(delay, true); 2020 timeout = (timeout > delay) ? timeout - delay : 0; 2021 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 2022 2023 if (status == ICE_ERR_AQ_NO_WORK) 2024 /* lock free, but no work to do */ 2025 break; 2026 2027 if (!status) 2028 /* lock acquired */ 2029 break; 2030 } 2031 if (status && status != ICE_ERR_AQ_NO_WORK) 2032 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 2033 2034 ice_acquire_res_exit: 2035 if (status == ICE_ERR_AQ_NO_WORK) { 2036 if (access == ICE_RES_WRITE) 2037 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 2038 else 2039 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 2040 } 2041 return status; 2042 } 2043 2044 /** 2045 * ice_release_res 2046 * @hw: pointer to the HW structure 2047 * @res: resource ID 2048 * 2049 * This function will release a resource using the proper Admin Command. 2050 */ 2051 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 2052 { 2053 enum ice_status status; 2054 u32 total_delay = 0; 2055 2056 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2057 2058 status = ice_aq_release_res(hw, res, 0, NULL); 2059 2060 /* there are some rare cases when trying to release the resource 2061 * results in an admin queue timeout, so handle them correctly 2062 */ 2063 while ((status == ICE_ERR_AQ_TIMEOUT) && 2064 (total_delay < hw->adminq.sq_cmd_timeout)) { 2065 ice_msec_delay(1, true); 2066 status = ice_aq_release_res(hw, res, 0, NULL); 2067 total_delay++; 2068 } 2069 } 2070 2071 /** 2072 * ice_aq_alloc_free_res - command to allocate/free resources 2073 * @hw: pointer to the HW struct 2074 * @num_entries: number of resource entries in buffer 2075 * @buf: Indirect buffer to hold data parameters and response 2076 * @buf_size: size of buffer for indirect commands 2077 * @opc: pass in the command opcode 2078 * @cd: pointer to command details structure or NULL 2079 * 2080 * Helper function to allocate/free resources using the admin queue commands 2081 */ 2082 enum ice_status 2083 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 2084 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 2085 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2086 { 2087 struct ice_aqc_alloc_free_res_cmd *cmd; 2088 struct ice_aq_desc desc; 2089 2090 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 2091 2092 cmd = &desc.params.sw_res_ctrl; 2093 2094 if (!buf) 2095 return ICE_ERR_PARAM; 2096 2097 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries)) 2098 return ICE_ERR_PARAM; 2099 2100 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2101 2102 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 2103 2104 cmd->num_entries = CPU_TO_LE16(num_entries); 2105 2106 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2107 } 2108 2109 /** 2110 * ice_alloc_hw_res - allocate resource 2111 * @hw: pointer to the HW struct 2112 * @type: type of resource 2113 * @num: number of resources to allocate 2114 * @btm: allocate from bottom 2115 * @res: pointer to array that will receive the resources 2116 */ 2117 enum ice_status 2118 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 2119 { 2120 struct ice_aqc_alloc_free_res_elem *buf; 2121 enum ice_status status; 2122 u16 buf_len; 2123 2124 buf_len = ice_struct_size(buf, elem, num); 2125 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); 2126 if (!buf) 2127 return ICE_ERR_NO_MEMORY; 2128 2129 /* Prepare buffer to allocate resource. */ 2130 buf->num_elems = CPU_TO_LE16(num); 2131 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 2132 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 2133 if (btm) 2134 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 2135 2136 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 2137 ice_aqc_opc_alloc_res, NULL); 2138 if (status) 2139 goto ice_alloc_res_exit; 2140 2141 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num, 2142 ICE_NONDMA_TO_NONDMA); 2143 2144 ice_alloc_res_exit: 2145 ice_free(hw, buf); 2146 return status; 2147 } 2148 2149 /** 2150 * ice_free_hw_res - free allocated HW resource 2151 * @hw: pointer to the HW struct 2152 * @type: type of resource to free 2153 * @num: number of resources 2154 * @res: pointer to array that contains the resources to free 2155 */ 2156 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 2157 { 2158 struct ice_aqc_alloc_free_res_elem *buf; 2159 enum ice_status status; 2160 u16 buf_len; 2161 2162 buf_len = ice_struct_size(buf, elem, num); 2163 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len); 2164 if (!buf) 2165 return ICE_ERR_NO_MEMORY; 2166 2167 /* Prepare buffer to free resource. */ 2168 buf->num_elems = CPU_TO_LE16(num); 2169 buf->res_type = CPU_TO_LE16(type); 2170 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num, 2171 ICE_NONDMA_TO_NONDMA); 2172 2173 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 2174 ice_aqc_opc_free_res, NULL); 2175 if (status) 2176 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 2177 2178 ice_free(hw, buf); 2179 return status; 2180 } 2181 2182 /** 2183 * ice_get_num_per_func - determine number of resources per PF 2184 * @hw: pointer to the HW structure 2185 * @max: value to be evenly split between each PF 2186 * 2187 * Determine the number of valid functions by going through the bitmap returned 2188 * from parsing capabilities and use this to calculate the number of resources 2189 * per PF based on the max value passed in. 2190 */ 2191 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 2192 { 2193 u8 funcs; 2194 2195 #define ICE_CAPS_VALID_FUNCS_M 0xFF 2196 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions & 2197 ICE_CAPS_VALID_FUNCS_M); 2198 2199 if (!funcs) 2200 return 0; 2201 2202 return max / funcs; 2203 } 2204 2205 /** 2206 * ice_print_led_caps - print LED capabilities 2207 * @hw: pointer to the ice_hw instance 2208 * @caps: pointer to common caps instance 2209 * @prefix: string to prefix when printing 2210 * @dbg: set to indicate debug print 2211 */ 2212 static void 2213 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2214 char const *prefix, bool dbg) 2215 { 2216 u8 i; 2217 2218 if (dbg) 2219 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix, 2220 caps->led_pin_num); 2221 else 2222 ice_info(hw, "%s: led_pin_num = %d\n", prefix, 2223 caps->led_pin_num); 2224 2225 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) { 2226 if (!caps->led[i]) 2227 continue; 2228 2229 if (dbg) 2230 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n", 2231 prefix, i, caps->led[i]); 2232 else 2233 ice_info(hw, "%s: led[%d] = %d\n", prefix, i, 2234 caps->led[i]); 2235 } 2236 } 2237 2238 /** 2239 * ice_print_sdp_caps - print SDP capabilities 2240 * @hw: pointer to the ice_hw instance 2241 * @caps: pointer to common caps instance 2242 * @prefix: string to prefix when printing 2243 * @dbg: set to indicate debug print 2244 */ 2245 static void 2246 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2247 char const *prefix, bool dbg) 2248 { 2249 u8 i; 2250 2251 if (dbg) 2252 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix, 2253 caps->sdp_pin_num); 2254 else 2255 ice_info(hw, "%s: sdp_pin_num = %d\n", prefix, 2256 caps->sdp_pin_num); 2257 2258 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) { 2259 if (!caps->sdp[i]) 2260 continue; 2261 2262 if (dbg) 2263 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n", 2264 prefix, i, caps->sdp[i]); 2265 else 2266 ice_info(hw, "%s: sdp[%d] = %d\n", prefix, 2267 i, caps->sdp[i]); 2268 } 2269 } 2270 2271 /** 2272 * ice_parse_common_caps - parse common device/function capabilities 2273 * @hw: pointer to the HW struct 2274 * @caps: pointer to common capabilities structure 2275 * @elem: the capability element to parse 2276 * @prefix: message prefix for tracing capabilities 2277 * 2278 * Given a capability element, extract relevant details into the common 2279 * capability structure. 2280 * 2281 * Returns: true if the capability matches one of the common capability ids, 2282 * false otherwise. 2283 */ 2284 static bool 2285 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 2286 struct ice_aqc_list_caps_elem *elem, const char *prefix) 2287 { 2288 u32 logical_id = LE32_TO_CPU(elem->logical_id); 2289 u32 phys_id = LE32_TO_CPU(elem->phys_id); 2290 u32 number = LE32_TO_CPU(elem->number); 2291 u16 cap = LE16_TO_CPU(elem->cap); 2292 bool found = true; 2293 2294 switch (cap) { 2295 case ICE_AQC_CAPS_SWITCHING_MODE: 2296 caps->switching_mode = number; 2297 ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix, 2298 caps->switching_mode); 2299 break; 2300 case ICE_AQC_CAPS_MANAGEABILITY_MODE: 2301 caps->mgmt_mode = number; 2302 caps->mgmt_protocols_mctp = logical_id; 2303 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix, 2304 caps->mgmt_mode); 2305 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix, 2306 caps->mgmt_protocols_mctp); 2307 break; 2308 case ICE_AQC_CAPS_OS2BMC: 2309 caps->os2bmc = number; 2310 ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc); 2311 break; 2312 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2313 caps->valid_functions = number; 2314 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 2315 caps->valid_functions); 2316 break; 2317 case ICE_AQC_CAPS_SRIOV: 2318 caps->sr_iov_1_1 = (number == 1); 2319 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 2320 caps->sr_iov_1_1); 2321 break; 2322 case ICE_AQC_CAPS_802_1QBG: 2323 caps->evb_802_1_qbg = (number == 1); 2324 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number); 2325 break; 2326 case ICE_AQC_CAPS_802_1BR: 2327 caps->evb_802_1_qbh = (number == 1); 2328 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number); 2329 break; 2330 case ICE_AQC_CAPS_DCB: 2331 caps->dcb = (number == 1); 2332 caps->active_tc_bitmap = logical_id; 2333 caps->maxtc = phys_id; 2334 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 2335 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 2336 caps->active_tc_bitmap); 2337 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 2338 break; 2339 case ICE_AQC_CAPS_ISCSI: 2340 caps->iscsi = (number == 1); 2341 ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi); 2342 break; 2343 case ICE_AQC_CAPS_RSS: 2344 caps->rss_table_size = number; 2345 caps->rss_table_entry_width = logical_id; 2346 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 2347 caps->rss_table_size); 2348 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 2349 caps->rss_table_entry_width); 2350 break; 2351 case ICE_AQC_CAPS_RXQS: 2352 caps->num_rxq = number; 2353 caps->rxq_first_id = phys_id; 2354 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 2355 caps->num_rxq); 2356 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 2357 caps->rxq_first_id); 2358 break; 2359 case ICE_AQC_CAPS_TXQS: 2360 caps->num_txq = number; 2361 caps->txq_first_id = phys_id; 2362 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 2363 caps->num_txq); 2364 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 2365 caps->txq_first_id); 2366 break; 2367 case ICE_AQC_CAPS_MSIX: 2368 caps->num_msix_vectors = number; 2369 caps->msix_vector_first_id = phys_id; 2370 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 2371 caps->num_msix_vectors); 2372 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 2373 caps->msix_vector_first_id); 2374 break; 2375 case ICE_AQC_CAPS_NVM_MGMT: 2376 caps->sec_rev_disabled = 2377 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ? 2378 true : false; 2379 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix, 2380 caps->sec_rev_disabled); 2381 caps->update_disabled = 2382 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ? 2383 true : false; 2384 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix, 2385 caps->update_disabled); 2386 caps->nvm_unified_update = 2387 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2388 true : false; 2389 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2390 caps->nvm_unified_update); 2391 caps->netlist_auth = 2392 (number & ICE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ? 2393 true : false; 2394 ice_debug(hw, ICE_DBG_INIT, "%s: netlist_auth = %d\n", prefix, 2395 caps->netlist_auth); 2396 break; 2397 case ICE_AQC_CAPS_CEM: 2398 caps->mgmt_cem = (number == 1); 2399 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix, 2400 caps->mgmt_cem); 2401 break; 2402 case ICE_AQC_CAPS_IWARP: 2403 caps->iwarp = (number == 1); 2404 ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp); 2405 break; 2406 case ICE_AQC_CAPS_ROCEV2_LAG: 2407 caps->roce_lag = (number == 1); 2408 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n", 2409 prefix, caps->roce_lag); 2410 break; 2411 case ICE_AQC_CAPS_LED: 2412 if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) { 2413 caps->led[phys_id] = true; 2414 caps->led_pin_num++; 2415 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id); 2416 } 2417 break; 2418 case ICE_AQC_CAPS_SDP: 2419 if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) { 2420 caps->sdp[phys_id] = true; 2421 caps->sdp_pin_num++; 2422 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id); 2423 } 2424 break; 2425 case ICE_AQC_CAPS_WR_CSR_PROT: 2426 caps->wr_csr_prot = number; 2427 caps->wr_csr_prot |= (u64)logical_id << 32; 2428 ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix, 2429 (unsigned long long)caps->wr_csr_prot); 2430 break; 2431 case ICE_AQC_CAPS_WOL_PROXY: 2432 caps->num_wol_proxy_fltr = number; 2433 caps->wol_proxy_vsi_seid = logical_id; 2434 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M); 2435 caps->acpi_prog_mthd = !!(phys_id & 2436 ICE_ACPI_PROG_MTHD_M); 2437 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M); 2438 ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix, 2439 caps->num_wol_proxy_fltr); 2440 ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix, 2441 caps->wol_proxy_vsi_seid); 2442 ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n", 2443 prefix, caps->apm_wol_support); 2444 break; 2445 case ICE_AQC_CAPS_MAX_MTU: 2446 caps->max_mtu = number; 2447 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2448 prefix, caps->max_mtu); 2449 break; 2450 case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE: 2451 caps->pcie_reset_avoidance = (number > 0); 2452 ice_debug(hw, ICE_DBG_INIT, 2453 "%s: pcie_reset_avoidance = %d\n", prefix, 2454 caps->pcie_reset_avoidance); 2455 break; 2456 case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT: 2457 caps->reset_restrict_support = (number == 1); 2458 ice_debug(hw, ICE_DBG_INIT, 2459 "%s: reset_restrict_support = %d\n", prefix, 2460 caps->reset_restrict_support); 2461 break; 2462 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0: 2463 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG1: 2464 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG2: 2465 case ICE_AQC_CAPS_EXT_TOPO_DEV_IMG3: 2466 { 2467 u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0); 2468 2469 caps->ext_topo_dev_img_ver_high[index] = number; 2470 caps->ext_topo_dev_img_ver_low[index] = logical_id; 2471 caps->ext_topo_dev_img_part_num[index] = 2472 (phys_id & ICE_EXT_TOPO_DEV_IMG_PART_NUM_M) >> 2473 ICE_EXT_TOPO_DEV_IMG_PART_NUM_S; 2474 caps->ext_topo_dev_img_load_en[index] = 2475 (phys_id & ICE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0; 2476 caps->ext_topo_dev_img_prog_en[index] = 2477 (phys_id & ICE_EXT_TOPO_DEV_IMG_PROG_EN) != 0; 2478 caps->ext_topo_dev_img_ver_schema[index] = 2479 (phys_id & ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA) != 0; 2480 ice_debug(hw, ICE_DBG_INIT, 2481 "%s: ext_topo_dev_img_ver_high[%d] = %d\n", 2482 prefix, index, 2483 caps->ext_topo_dev_img_ver_high[index]); 2484 ice_debug(hw, ICE_DBG_INIT, 2485 "%s: ext_topo_dev_img_ver_low[%d] = %d\n", 2486 prefix, index, 2487 caps->ext_topo_dev_img_ver_low[index]); 2488 ice_debug(hw, ICE_DBG_INIT, 2489 "%s: ext_topo_dev_img_part_num[%d] = %d\n", 2490 prefix, index, 2491 caps->ext_topo_dev_img_part_num[index]); 2492 ice_debug(hw, ICE_DBG_INIT, 2493 "%s: ext_topo_dev_img_load_en[%d] = %d\n", 2494 prefix, index, 2495 caps->ext_topo_dev_img_load_en[index]); 2496 ice_debug(hw, ICE_DBG_INIT, 2497 "%s: ext_topo_dev_img_prog_en[%d] = %d\n", 2498 prefix, index, 2499 caps->ext_topo_dev_img_prog_en[index]); 2500 ice_debug(hw, ICE_DBG_INIT, 2501 "%s: ext_topo_dev_img_ver_schema[%d] = %d\n", 2502 prefix, index, 2503 caps->ext_topo_dev_img_ver_schema[index]); 2504 break; 2505 } 2506 case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: 2507 caps->tx_sched_topo_comp_mode_en = (number == 1); 2508 break; 2509 case ICE_AQC_CAPS_DYN_FLATTENING: 2510 caps->dyn_flattening_en = (number == 1); 2511 ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n", 2512 prefix, caps->dyn_flattening_en); 2513 break; 2514 case ICE_AQC_CAPS_OROM_RECOVERY_UPDATE: 2515 caps->orom_recovery_update = (number == 1); 2516 ice_debug(hw, ICE_DBG_INIT, "%s: orom_recovery_update = %d\n", 2517 prefix, caps->orom_recovery_update); 2518 break; 2519 default: 2520 /* Not one of the recognized common capabilities */ 2521 found = false; 2522 } 2523 2524 return found; 2525 } 2526 2527 /** 2528 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2529 * @hw: pointer to the HW structure 2530 * @caps: pointer to capabilities structure to fix 2531 * 2532 * Re-calculate the capabilities that are dependent on the number of physical 2533 * ports; i.e. some features are not supported or function differently on 2534 * devices with more than 4 ports. 2535 */ 2536 static void 2537 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2538 { 2539 /* This assumes device capabilities are always scanned before function 2540 * capabilities during the initialization flow. 2541 */ 2542 if (hw->dev_caps.num_funcs > 4) { 2543 /* Max 4 TCs per port */ 2544 caps->maxtc = 4; 2545 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2546 caps->maxtc); 2547 if (caps->iwarp) { 2548 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2549 caps->iwarp = 0; 2550 } 2551 2552 /* print message only when processing device capabilities 2553 * during initialization. 2554 */ 2555 if (caps == &hw->dev_caps.common_cap) 2556 ice_info(hw, "RDMA functionality is not available with the current device configuration.\n"); 2557 } 2558 } 2559 2560 /** 2561 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2562 * @hw: pointer to the HW struct 2563 * @func_p: pointer to function capabilities structure 2564 * @cap: pointer to the capability element to parse 2565 * 2566 * Extract function capabilities for ICE_AQC_CAPS_VF. 2567 */ 2568 static void 2569 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2570 struct ice_aqc_list_caps_elem *cap) 2571 { 2572 u32 number = LE32_TO_CPU(cap->number); 2573 u32 logical_id = LE32_TO_CPU(cap->logical_id); 2574 2575 func_p->num_allocd_vfs = number; 2576 func_p->vf_base_id = logical_id; 2577 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2578 func_p->num_allocd_vfs); 2579 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2580 func_p->vf_base_id); 2581 } 2582 2583 /** 2584 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2585 * @hw: pointer to the HW struct 2586 * @func_p: pointer to function capabilities structure 2587 * @cap: pointer to the capability element to parse 2588 * 2589 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2590 */ 2591 static void 2592 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2593 struct ice_aqc_list_caps_elem *cap) 2594 { 2595 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2596 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2597 LE32_TO_CPU(cap->number)); 2598 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2599 func_p->guar_num_vsi); 2600 } 2601 2602 /** 2603 * ice_parse_func_caps - Parse function capabilities 2604 * @hw: pointer to the HW struct 2605 * @func_p: pointer to function capabilities structure 2606 * @buf: buffer containing the function capability records 2607 * @cap_count: the number of capabilities 2608 * 2609 * Helper function to parse function (0x000A) capabilities list. For 2610 * capabilities shared between device and function, this relies on 2611 * ice_parse_common_caps. 2612 * 2613 * Loop through the list of provided capabilities and extract the relevant 2614 * data into the function capabilities structured. 2615 */ 2616 static void 2617 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2618 void *buf, u32 cap_count) 2619 { 2620 struct ice_aqc_list_caps_elem *cap_resp; 2621 u32 i; 2622 2623 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 2624 2625 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM); 2626 2627 for (i = 0; i < cap_count; i++) { 2628 u16 cap = LE16_TO_CPU(cap_resp[i].cap); 2629 bool found; 2630 2631 found = ice_parse_common_caps(hw, &func_p->common_cap, 2632 &cap_resp[i], "func caps"); 2633 2634 switch (cap) { 2635 case ICE_AQC_CAPS_VF: 2636 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2637 break; 2638 case ICE_AQC_CAPS_VSI: 2639 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2640 break; 2641 default: 2642 /* Don't list common capabilities as unknown */ 2643 if (!found) 2644 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2645 i, cap); 2646 break; 2647 } 2648 } 2649 2650 ice_print_led_caps(hw, &func_p->common_cap, "func caps", true); 2651 ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true); 2652 2653 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2654 } 2655 2656 /** 2657 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2658 * @hw: pointer to the HW struct 2659 * @dev_p: pointer to device capabilities structure 2660 * @cap: capability element to parse 2661 * 2662 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2663 */ 2664 static void 2665 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2666 struct ice_aqc_list_caps_elem *cap) 2667 { 2668 u32 number = LE32_TO_CPU(cap->number); 2669 2670 dev_p->num_funcs = ice_hweight32(number); 2671 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2672 dev_p->num_funcs); 2673 2674 } 2675 2676 /** 2677 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2678 * @hw: pointer to the HW struct 2679 * @dev_p: pointer to device capabilities structure 2680 * @cap: capability element to parse 2681 * 2682 * Parse ICE_AQC_CAPS_VF for device capabilities. 2683 */ 2684 static void 2685 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2686 struct ice_aqc_list_caps_elem *cap) 2687 { 2688 u32 number = LE32_TO_CPU(cap->number); 2689 2690 dev_p->num_vfs_exposed = number; 2691 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2692 dev_p->num_vfs_exposed); 2693 } 2694 2695 /** 2696 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2697 * @hw: pointer to the HW struct 2698 * @dev_p: pointer to device capabilities structure 2699 * @cap: capability element to parse 2700 * 2701 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2702 */ 2703 static void 2704 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2705 struct ice_aqc_list_caps_elem *cap) 2706 { 2707 u32 number = LE32_TO_CPU(cap->number); 2708 2709 dev_p->num_vsi_allocd_to_host = number; 2710 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2711 dev_p->num_vsi_allocd_to_host); 2712 } 2713 2714 /** 2715 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap 2716 * @hw: pointer to the HW struct 2717 * @dev_p: pointer to device capabilities structure 2718 * @cap: capability element to parse 2719 * 2720 * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. 2721 */ 2722 static void 2723 ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2724 struct ice_aqc_list_caps_elem *cap) 2725 { 2726 dev_p->nac_topo.mode = LE32_TO_CPU(cap->number); 2727 dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M; 2728 2729 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", 2730 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); 2731 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", 2732 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); 2733 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", 2734 dev_p->nac_topo.id); 2735 } 2736 2737 /** 2738 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap 2739 * @hw: pointer to the HW struct 2740 * @dev_p: pointer to device capabilities structure 2741 * @cap: capability element to parse 2742 * 2743 * Parse ICE_AQC_CAPS_SENSOR_READING for device capability for reading 2744 * enabled sensors. 2745 */ 2746 static void 2747 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2748 struct ice_aqc_list_caps_elem *cap) 2749 { 2750 dev_p->supported_sensors = LE32_TO_CPU(cap->number); 2751 2752 ice_debug(hw, ICE_DBG_INIT, 2753 "dev caps: supported sensors (bitmap) = 0x%x\n", 2754 dev_p->supported_sensors); 2755 } 2756 2757 /** 2758 * ice_parse_dev_caps - Parse device capabilities 2759 * @hw: pointer to the HW struct 2760 * @dev_p: pointer to device capabilities structure 2761 * @buf: buffer containing the device capability records 2762 * @cap_count: the number of capabilities 2763 * 2764 * Helper device to parse device (0x000B) capabilities list. For 2765 * capabilities shared between device and function, this relies on 2766 * ice_parse_common_caps. 2767 * 2768 * Loop through the list of provided capabilities and extract the relevant 2769 * data into the device capabilities structured. 2770 */ 2771 static void 2772 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2773 void *buf, u32 cap_count) 2774 { 2775 struct ice_aqc_list_caps_elem *cap_resp; 2776 u32 i; 2777 2778 cap_resp = (struct ice_aqc_list_caps_elem *)buf; 2779 2780 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM); 2781 2782 for (i = 0; i < cap_count; i++) { 2783 u16 cap = LE16_TO_CPU(cap_resp[i].cap); 2784 bool found; 2785 2786 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2787 &cap_resp[i], "dev caps"); 2788 2789 switch (cap) { 2790 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2791 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2792 break; 2793 case ICE_AQC_CAPS_VF: 2794 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2795 break; 2796 case ICE_AQC_CAPS_VSI: 2797 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2798 break; 2799 case ICE_AQC_CAPS_NAC_TOPOLOGY: 2800 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); 2801 break; 2802 case ICE_AQC_CAPS_SENSOR_READING: 2803 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); 2804 break; 2805 default: 2806 /* Don't list common capabilities as unknown */ 2807 if (!found) 2808 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2809 i, cap); 2810 break; 2811 } 2812 } 2813 2814 ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true); 2815 ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true); 2816 2817 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2818 } 2819 2820 /** 2821 * ice_aq_list_caps - query function/device capabilities 2822 * @hw: pointer to the HW struct 2823 * @buf: a buffer to hold the capabilities 2824 * @buf_size: size of the buffer 2825 * @cap_count: if not NULL, set to the number of capabilities reported 2826 * @opc: capabilities type to discover, device or function 2827 * @cd: pointer to command details structure or NULL 2828 * 2829 * Get the function (0x000A) or device (0x000B) capabilities description from 2830 * firmware and store it in the buffer. 2831 * 2832 * If the cap_count pointer is not NULL, then it is set to the number of 2833 * capabilities firmware will report. Note that if the buffer size is too 2834 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2835 * cap_count will still be updated in this case. It is recommended that the 2836 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2837 * firmware could return) to avoid this. 2838 */ 2839 static enum ice_status 2840 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2841 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2842 { 2843 struct ice_aqc_list_caps *cmd; 2844 struct ice_aq_desc desc; 2845 enum ice_status status; 2846 2847 cmd = &desc.params.get_cap; 2848 2849 if (opc != ice_aqc_opc_list_func_caps && 2850 opc != ice_aqc_opc_list_dev_caps) 2851 return ICE_ERR_PARAM; 2852 2853 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2854 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2855 2856 if (cap_count) 2857 *cap_count = LE32_TO_CPU(cmd->count); 2858 2859 return status; 2860 } 2861 2862 /** 2863 * ice_discover_dev_caps - Read and extract device capabilities 2864 * @hw: pointer to the hardware structure 2865 * @dev_caps: pointer to device capabilities structure 2866 * 2867 * Read the device capabilities and extract them into the dev_caps structure 2868 * for later use. 2869 */ 2870 static enum ice_status 2871 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2872 { 2873 enum ice_status status; 2874 u32 cap_count = 0; 2875 void *cbuf; 2876 2877 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); 2878 if (!cbuf) 2879 return ICE_ERR_NO_MEMORY; 2880 2881 /* Although the driver doesn't know the number of capabilities the 2882 * device will return, we can simply send a 4KB buffer, the maximum 2883 * possible size that firmware can return. 2884 */ 2885 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2886 2887 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2888 ice_aqc_opc_list_dev_caps, NULL); 2889 if (!status) 2890 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2891 ice_free(hw, cbuf); 2892 2893 return status; 2894 } 2895 2896 /** 2897 * ice_discover_func_caps - Read and extract function capabilities 2898 * @hw: pointer to the hardware structure 2899 * @func_caps: pointer to function capabilities structure 2900 * 2901 * Read the function capabilities and extract them into the func_caps structure 2902 * for later use. 2903 */ 2904 static enum ice_status 2905 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2906 { 2907 enum ice_status status; 2908 u32 cap_count = 0; 2909 void *cbuf; 2910 2911 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN); 2912 if (!cbuf) 2913 return ICE_ERR_NO_MEMORY; 2914 2915 /* Although the driver doesn't know the number of capabilities the 2916 * device will return, we can simply send a 4KB buffer, the maximum 2917 * possible size that firmware can return. 2918 */ 2919 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2920 2921 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2922 ice_aqc_opc_list_func_caps, NULL); 2923 if (!status) 2924 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2925 ice_free(hw, cbuf); 2926 2927 return status; 2928 } 2929 2930 /** 2931 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2932 * @hw: pointer to the hardware structure 2933 */ 2934 void ice_set_safe_mode_caps(struct ice_hw *hw) 2935 { 2936 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2937 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2938 struct ice_hw_common_caps cached_caps; 2939 u32 num_funcs; 2940 2941 /* cache some func_caps values that should be restored after memset */ 2942 cached_caps = func_caps->common_cap; 2943 2944 /* unset func capabilities */ 2945 memset(func_caps, 0, sizeof(*func_caps)); 2946 2947 #define ICE_RESTORE_FUNC_CAP(name) \ 2948 func_caps->common_cap.name = cached_caps.name 2949 2950 /* restore cached values */ 2951 ICE_RESTORE_FUNC_CAP(valid_functions); 2952 ICE_RESTORE_FUNC_CAP(txq_first_id); 2953 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2954 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2955 ICE_RESTORE_FUNC_CAP(max_mtu); 2956 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2957 2958 /* one Tx and one Rx queue in safe mode */ 2959 func_caps->common_cap.num_rxq = 1; 2960 func_caps->common_cap.num_txq = 1; 2961 2962 /* two MSIX vectors, one for traffic and one for misc causes */ 2963 func_caps->common_cap.num_msix_vectors = 2; 2964 func_caps->guar_num_vsi = 1; 2965 2966 /* cache some dev_caps values that should be restored after memset */ 2967 cached_caps = dev_caps->common_cap; 2968 num_funcs = dev_caps->num_funcs; 2969 2970 /* unset dev capabilities */ 2971 memset(dev_caps, 0, sizeof(*dev_caps)); 2972 2973 #define ICE_RESTORE_DEV_CAP(name) \ 2974 dev_caps->common_cap.name = cached_caps.name 2975 2976 /* restore cached values */ 2977 ICE_RESTORE_DEV_CAP(valid_functions); 2978 ICE_RESTORE_DEV_CAP(txq_first_id); 2979 ICE_RESTORE_DEV_CAP(rxq_first_id); 2980 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2981 ICE_RESTORE_DEV_CAP(max_mtu); 2982 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2983 dev_caps->num_funcs = num_funcs; 2984 2985 /* one Tx and one Rx queue per function in safe mode */ 2986 dev_caps->common_cap.num_rxq = num_funcs; 2987 dev_caps->common_cap.num_txq = num_funcs; 2988 2989 /* two MSIX vectors per function */ 2990 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2991 } 2992 2993 /** 2994 * ice_get_caps - get info about the HW 2995 * @hw: pointer to the hardware structure 2996 */ 2997 enum ice_status ice_get_caps(struct ice_hw *hw) 2998 { 2999 enum ice_status status; 3000 3001 status = ice_discover_dev_caps(hw, &hw->dev_caps); 3002 if (status) 3003 return status; 3004 3005 return ice_discover_func_caps(hw, &hw->func_caps); 3006 } 3007 3008 /** 3009 * ice_aq_manage_mac_write - manage MAC address write command 3010 * @hw: pointer to the HW struct 3011 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 3012 * @flags: flags to control write behavior 3013 * @cd: pointer to command details structure or NULL 3014 * 3015 * This function is used to write MAC address to the NVM (0x0108). 3016 */ 3017 enum ice_status 3018 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 3019 struct ice_sq_cd *cd) 3020 { 3021 struct ice_aqc_manage_mac_write *cmd; 3022 struct ice_aq_desc desc; 3023 3024 cmd = &desc.params.mac_write; 3025 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 3026 3027 cmd->flags = flags; 3028 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA); 3029 3030 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3031 } 3032 3033 /** 3034 * ice_aq_clear_pxe_mode 3035 * @hw: pointer to the HW struct 3036 * 3037 * Tell the firmware that the driver is taking over from PXE (0x0110). 3038 */ 3039 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 3040 { 3041 struct ice_aq_desc desc; 3042 3043 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 3044 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 3045 3046 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 3047 } 3048 3049 /** 3050 * ice_clear_pxe_mode - clear pxe operations mode 3051 * @hw: pointer to the HW struct 3052 * 3053 * Make sure all PXE mode settings are cleared, including things 3054 * like descriptor fetch/write-back mode. 3055 */ 3056 void ice_clear_pxe_mode(struct ice_hw *hw) 3057 { 3058 if (ice_check_sq_alive(hw, &hw->adminq)) 3059 ice_aq_clear_pxe_mode(hw); 3060 } 3061 3062 /** 3063 * ice_aq_set_port_params - set physical port parameters. 3064 * @pi: pointer to the port info struct 3065 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded 3066 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI 3067 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded 3068 * @double_vlan: if set double VLAN is enabled 3069 * @cd: pointer to command details structure or NULL 3070 * 3071 * Set Physical port parameters (0x0203) 3072 */ 3073 enum ice_status 3074 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi, 3075 bool save_bad_pac, bool pad_short_pac, bool double_vlan, 3076 struct ice_sq_cd *cd) 3077 { 3078 struct ice_aqc_set_port_params *cmd; 3079 struct ice_hw *hw = pi->hw; 3080 struct ice_aq_desc desc; 3081 u16 cmd_flags = 0; 3082 3083 cmd = &desc.params.set_port_params; 3084 3085 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params); 3086 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi); 3087 if (save_bad_pac) 3088 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS; 3089 if (pad_short_pac) 3090 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS; 3091 if (double_vlan) 3092 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; 3093 cmd->cmd_flags = CPU_TO_LE16(cmd_flags); 3094 3095 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3096 } 3097 3098 /** 3099 * ice_is_100m_speed_supported 3100 * @hw: pointer to the HW struct 3101 * 3102 * returns true if 100M speeds are supported by the device, 3103 * false otherwise. 3104 */ 3105 bool ice_is_100m_speed_supported(struct ice_hw *hw) 3106 { 3107 switch (hw->device_id) { 3108 case ICE_DEV_ID_E822C_SGMII: 3109 case ICE_DEV_ID_E822L_SGMII: 3110 case ICE_DEV_ID_E823L_1GBE: 3111 case ICE_DEV_ID_E823C_SGMII: 3112 return true; 3113 default: 3114 return false; 3115 } 3116 } 3117 3118 /** 3119 * ice_get_link_speed_based_on_phy_type - returns link speed 3120 * @phy_type_low: lower part of phy_type 3121 * @phy_type_high: higher part of phy_type 3122 * 3123 * This helper function will convert an entry in PHY type structure 3124 * [phy_type_low, phy_type_high] to its corresponding link speed. 3125 * Note: In the structure of [phy_type_low, phy_type_high], there should 3126 * be one bit set, as this function will convert one PHY type to its 3127 * speed. 3128 * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3129 * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned 3130 */ 3131 static u16 3132 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 3133 { 3134 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3135 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3136 3137 switch (phy_type_low) { 3138 case ICE_PHY_TYPE_LOW_100BASE_TX: 3139 case ICE_PHY_TYPE_LOW_100M_SGMII: 3140 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 3141 break; 3142 case ICE_PHY_TYPE_LOW_1000BASE_T: 3143 case ICE_PHY_TYPE_LOW_1000BASE_SX: 3144 case ICE_PHY_TYPE_LOW_1000BASE_LX: 3145 case ICE_PHY_TYPE_LOW_1000BASE_KX: 3146 case ICE_PHY_TYPE_LOW_1G_SGMII: 3147 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 3148 break; 3149 case ICE_PHY_TYPE_LOW_2500BASE_T: 3150 case ICE_PHY_TYPE_LOW_2500BASE_X: 3151 case ICE_PHY_TYPE_LOW_2500BASE_KX: 3152 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 3153 break; 3154 case ICE_PHY_TYPE_LOW_5GBASE_T: 3155 case ICE_PHY_TYPE_LOW_5GBASE_KR: 3156 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 3157 break; 3158 case ICE_PHY_TYPE_LOW_10GBASE_T: 3159 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 3160 case ICE_PHY_TYPE_LOW_10GBASE_SR: 3161 case ICE_PHY_TYPE_LOW_10GBASE_LR: 3162 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 3163 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 3164 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 3165 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 3166 break; 3167 case ICE_PHY_TYPE_LOW_25GBASE_T: 3168 case ICE_PHY_TYPE_LOW_25GBASE_CR: 3169 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 3170 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 3171 case ICE_PHY_TYPE_LOW_25GBASE_SR: 3172 case ICE_PHY_TYPE_LOW_25GBASE_LR: 3173 case ICE_PHY_TYPE_LOW_25GBASE_KR: 3174 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 3175 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 3176 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 3177 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 3178 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 3179 break; 3180 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 3181 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 3182 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 3183 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 3184 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 3185 case ICE_PHY_TYPE_LOW_40G_XLAUI: 3186 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 3187 break; 3188 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 3189 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 3190 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 3191 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 3192 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 3193 case ICE_PHY_TYPE_LOW_50G_LAUI2: 3194 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 3195 case ICE_PHY_TYPE_LOW_50G_AUI2: 3196 case ICE_PHY_TYPE_LOW_50GBASE_CP: 3197 case ICE_PHY_TYPE_LOW_50GBASE_SR: 3198 case ICE_PHY_TYPE_LOW_50GBASE_FR: 3199 case ICE_PHY_TYPE_LOW_50GBASE_LR: 3200 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 3201 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 3202 case ICE_PHY_TYPE_LOW_50G_AUI1: 3203 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 3204 break; 3205 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 3206 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 3207 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 3208 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 3209 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 3210 case ICE_PHY_TYPE_LOW_100G_CAUI4: 3211 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 3212 case ICE_PHY_TYPE_LOW_100G_AUI4: 3213 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 3214 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 3215 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 3216 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 3217 case ICE_PHY_TYPE_LOW_100GBASE_DR: 3218 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 3219 break; 3220 default: 3221 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 3222 break; 3223 } 3224 3225 switch (phy_type_high) { 3226 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 3227 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 3228 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 3229 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 3230 case ICE_PHY_TYPE_HIGH_100G_AUI2: 3231 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 3232 break; 3233 default: 3234 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 3235 break; 3236 } 3237 3238 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 3239 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3240 return ICE_AQ_LINK_SPEED_UNKNOWN; 3241 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3242 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 3243 return ICE_AQ_LINK_SPEED_UNKNOWN; 3244 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 3245 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 3246 return speed_phy_type_low; 3247 else 3248 return speed_phy_type_high; 3249 } 3250 3251 /** 3252 * ice_update_phy_type 3253 * @phy_type_low: pointer to the lower part of phy_type 3254 * @phy_type_high: pointer to the higher part of phy_type 3255 * @link_speeds_bitmap: targeted link speeds bitmap 3256 * 3257 * Note: For the link_speeds_bitmap structure, you can check it at 3258 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 3259 * link_speeds_bitmap include multiple speeds. 3260 * 3261 * Each entry in this [phy_type_low, phy_type_high] structure will 3262 * present a certain link speed. This helper function will turn on bits 3263 * in [phy_type_low, phy_type_high] structure based on the value of 3264 * link_speeds_bitmap input parameter. 3265 */ 3266 void 3267 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 3268 u16 link_speeds_bitmap) 3269 { 3270 u64 pt_high; 3271 u64 pt_low; 3272 int index; 3273 u16 speed; 3274 3275 /* We first check with low part of phy_type */ 3276 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 3277 pt_low = BIT_ULL(index); 3278 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 3279 3280 if (link_speeds_bitmap & speed) 3281 *phy_type_low |= BIT_ULL(index); 3282 } 3283 3284 /* We then check with high part of phy_type */ 3285 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 3286 pt_high = BIT_ULL(index); 3287 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 3288 3289 if (link_speeds_bitmap & speed) 3290 *phy_type_high |= BIT_ULL(index); 3291 } 3292 } 3293 3294 /** 3295 * ice_aq_set_phy_cfg 3296 * @hw: pointer to the HW struct 3297 * @pi: port info structure of the interested logical port 3298 * @cfg: structure with PHY configuration data to be set 3299 * @cd: pointer to command details structure or NULL 3300 * 3301 * Set the various PHY configuration parameters supported on the Port. 3302 * One or more of the Set PHY config parameters may be ignored in an MFP 3303 * mode as the PF may not have the privilege to set some of the PHY Config 3304 * parameters. This status will be indicated by the command response (0x0601). 3305 */ 3306 enum ice_status 3307 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 3308 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 3309 { 3310 struct ice_aq_desc desc; 3311 enum ice_status status; 3312 3313 if (!cfg) 3314 return ICE_ERR_PARAM; 3315 3316 /* Ensure that only valid bits of cfg->caps can be turned on. */ 3317 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 3318 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 3319 cfg->caps); 3320 3321 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 3322 } 3323 3324 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 3325 desc.params.set_phy.lport_num = pi->lport; 3326 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 3327 3328 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 3329 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 3330 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low)); 3331 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 3332 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high)); 3333 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 3334 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 3335 cfg->low_power_ctrl_an); 3336 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 3337 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 3338 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 3339 cfg->link_fec_opt); 3340 3341 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 3342 3343 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 3344 status = ICE_SUCCESS; 3345 3346 if (!status) 3347 pi->phy.curr_user_phy_cfg = *cfg; 3348 3349 return status; 3350 } 3351 3352 /** 3353 * ice_update_link_info - update status of the HW network link 3354 * @pi: port info structure of the interested logical port 3355 */ 3356 enum ice_status ice_update_link_info(struct ice_port_info *pi) 3357 { 3358 struct ice_link_status *li; 3359 enum ice_status status; 3360 3361 if (!pi) 3362 return ICE_ERR_PARAM; 3363 3364 li = &pi->phy.link_info; 3365 3366 status = ice_aq_get_link_info(pi, true, NULL, NULL); 3367 if (status) 3368 return status; 3369 3370 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 3371 struct ice_aqc_get_phy_caps_data *pcaps; 3372 struct ice_hw *hw; 3373 3374 hw = pi->hw; 3375 pcaps = (struct ice_aqc_get_phy_caps_data *) 3376 ice_malloc(hw, sizeof(*pcaps)); 3377 if (!pcaps) 3378 return ICE_ERR_NO_MEMORY; 3379 3380 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3381 pcaps, NULL); 3382 3383 if (status == ICE_SUCCESS) 3384 ice_memcpy(li->module_type, &pcaps->module_type, 3385 sizeof(li->module_type), 3386 ICE_NONDMA_TO_NONDMA); 3387 3388 ice_free(hw, pcaps); 3389 } 3390 3391 return status; 3392 } 3393 3394 /** 3395 * ice_cache_phy_user_req 3396 * @pi: port information structure 3397 * @cache_data: PHY logging data 3398 * @cache_mode: PHY logging mode 3399 * 3400 * Log the user request on (FC, FEC, SPEED) for later user. 3401 */ 3402 static void 3403 ice_cache_phy_user_req(struct ice_port_info *pi, 3404 struct ice_phy_cache_mode_data cache_data, 3405 enum ice_phy_cache_mode cache_mode) 3406 { 3407 if (!pi) 3408 return; 3409 3410 switch (cache_mode) { 3411 case ICE_FC_MODE: 3412 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 3413 break; 3414 case ICE_SPEED_MODE: 3415 pi->phy.curr_user_speed_req = 3416 cache_data.data.curr_user_speed_req; 3417 break; 3418 case ICE_FEC_MODE: 3419 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 3420 break; 3421 default: 3422 break; 3423 } 3424 } 3425 3426 /** 3427 * ice_caps_to_fc_mode 3428 * @caps: PHY capabilities 3429 * 3430 * Convert PHY FC capabilities to ice FC mode 3431 */ 3432 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 3433 { 3434 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 3435 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3436 return ICE_FC_FULL; 3437 3438 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 3439 return ICE_FC_TX_PAUSE; 3440 3441 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 3442 return ICE_FC_RX_PAUSE; 3443 3444 return ICE_FC_NONE; 3445 } 3446 3447 /** 3448 * ice_caps_to_fec_mode 3449 * @caps: PHY capabilities 3450 * @fec_options: Link FEC options 3451 * 3452 * Convert PHY FEC capabilities to ice FEC mode 3453 */ 3454 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3455 { 3456 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) { 3457 if (fec_options & ICE_AQC_PHY_FEC_DIS) 3458 return ICE_FEC_DIS_AUTO; 3459 else 3460 return ICE_FEC_AUTO; 3461 } 3462 3463 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3464 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3465 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3466 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3467 return ICE_FEC_BASER; 3468 3469 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3470 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3471 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3472 return ICE_FEC_RS; 3473 3474 return ICE_FEC_NONE; 3475 } 3476 3477 /** 3478 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3479 * @pi: port information structure 3480 * @cfg: PHY configuration data to set FC mode 3481 * @req_mode: FC mode to configure 3482 */ 3483 static enum ice_status 3484 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3485 enum ice_fc_mode req_mode) 3486 { 3487 struct ice_phy_cache_mode_data cache_data; 3488 u8 pause_mask = 0x0; 3489 3490 if (!pi || !cfg) 3491 return ICE_ERR_BAD_PTR; 3492 switch (req_mode) { 3493 case ICE_FC_AUTO: 3494 { 3495 struct ice_aqc_get_phy_caps_data *pcaps; 3496 enum ice_status status; 3497 3498 pcaps = (struct ice_aqc_get_phy_caps_data *) 3499 ice_malloc(pi->hw, sizeof(*pcaps)); 3500 if (!pcaps) 3501 return ICE_ERR_NO_MEMORY; 3502 /* Query the value of FC that both the NIC and attached media 3503 * can do. 3504 */ 3505 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 3506 pcaps, NULL); 3507 if (status) { 3508 ice_free(pi->hw, pcaps); 3509 return status; 3510 } 3511 3512 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3513 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3514 3515 ice_free(pi->hw, pcaps); 3516 break; 3517 } 3518 case ICE_FC_FULL: 3519 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3520 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3521 break; 3522 case ICE_FC_RX_PAUSE: 3523 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3524 break; 3525 case ICE_FC_TX_PAUSE: 3526 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3527 break; 3528 default: 3529 break; 3530 } 3531 3532 /* clear the old pause settings */ 3533 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3534 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3535 3536 /* set the new capabilities */ 3537 cfg->caps |= pause_mask; 3538 3539 /* Cache user FC request */ 3540 cache_data.data.curr_user_fc_req = req_mode; 3541 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3542 3543 return ICE_SUCCESS; 3544 } 3545 3546 /** 3547 * ice_set_fc 3548 * @pi: port information structure 3549 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3550 * @ena_auto_link_update: enable automatic link update 3551 * 3552 * Set the requested flow control mode. 3553 */ 3554 enum ice_status 3555 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3556 { 3557 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3558 struct ice_aqc_get_phy_caps_data *pcaps; 3559 enum ice_status status; 3560 struct ice_hw *hw; 3561 3562 if (!pi || !aq_failures) 3563 return ICE_ERR_BAD_PTR; 3564 3565 *aq_failures = 0; 3566 hw = pi->hw; 3567 3568 pcaps = (struct ice_aqc_get_phy_caps_data *) 3569 ice_malloc(hw, sizeof(*pcaps)); 3570 if (!pcaps) 3571 return ICE_ERR_NO_MEMORY; 3572 3573 /* Get the current PHY config */ 3574 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3575 pcaps, NULL); 3576 3577 if (status) { 3578 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3579 goto out; 3580 } 3581 3582 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3583 3584 /* Configure the set PHY data */ 3585 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3586 if (status) { 3587 if (status != ICE_ERR_BAD_PTR) 3588 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3589 3590 goto out; 3591 } 3592 3593 /* If the capabilities have changed, then set the new config */ 3594 if (cfg.caps != pcaps->caps) { 3595 int retry_count, retry_max = 10; 3596 3597 /* Auto restart link so settings take effect */ 3598 if (ena_auto_link_update) 3599 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3600 3601 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3602 if (status) { 3603 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3604 goto out; 3605 } 3606 3607 /* Update the link info 3608 * It sometimes takes a really long time for link to 3609 * come back from the atomic reset. Thus, we wait a 3610 * little bit. 3611 */ 3612 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3613 status = ice_update_link_info(pi); 3614 3615 if (status == ICE_SUCCESS) 3616 break; 3617 3618 ice_msec_delay(100, true); 3619 } 3620 3621 if (status) 3622 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3623 } 3624 3625 out: 3626 ice_free(hw, pcaps); 3627 return status; 3628 } 3629 3630 /** 3631 * ice_phy_caps_equals_cfg 3632 * @phy_caps: PHY capabilities 3633 * @phy_cfg: PHY configuration 3634 * 3635 * Helper function to determine if PHY capabilities matches PHY 3636 * configuration 3637 */ 3638 bool 3639 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3640 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3641 { 3642 u8 caps_mask, cfg_mask; 3643 3644 if (!phy_caps || !phy_cfg) 3645 return false; 3646 3647 /* These bits are not common between capabilities and configuration. 3648 * Do not use them to determine equality. 3649 */ 3650 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3651 ICE_AQC_PHY_EN_MOD_QUAL); 3652 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3653 3654 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3655 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3656 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3657 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3658 phy_caps->eee_cap != phy_cfg->eee_cap || 3659 phy_caps->eeer_value != phy_cfg->eeer_value || 3660 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3661 return false; 3662 3663 return true; 3664 } 3665 3666 /** 3667 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3668 * @pi: port information structure 3669 * @caps: PHY ability structure to copy data from 3670 * @cfg: PHY configuration structure to copy data to 3671 * 3672 * Helper function to copy AQC PHY get ability data to PHY set configuration 3673 * data structure 3674 */ 3675 void 3676 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3677 struct ice_aqc_get_phy_caps_data *caps, 3678 struct ice_aqc_set_phy_cfg_data *cfg) 3679 { 3680 if (!pi || !caps || !cfg) 3681 return; 3682 3683 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM); 3684 cfg->phy_type_low = caps->phy_type_low; 3685 cfg->phy_type_high = caps->phy_type_high; 3686 cfg->caps = caps->caps; 3687 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3688 cfg->eee_cap = caps->eee_cap; 3689 cfg->eeer_value = caps->eeer_value; 3690 cfg->link_fec_opt = caps->link_fec_options; 3691 cfg->module_compliance_enforcement = 3692 caps->module_compliance_enforcement; 3693 } 3694 3695 /** 3696 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3697 * @pi: port information structure 3698 * @cfg: PHY configuration data to set FEC mode 3699 * @fec: FEC mode to configure 3700 */ 3701 enum ice_status 3702 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3703 enum ice_fec_mode fec) 3704 { 3705 struct ice_aqc_get_phy_caps_data *pcaps; 3706 enum ice_status status = ICE_SUCCESS; 3707 struct ice_hw *hw; 3708 3709 if (!pi || !cfg) 3710 return ICE_ERR_BAD_PTR; 3711 3712 hw = pi->hw; 3713 3714 pcaps = (struct ice_aqc_get_phy_caps_data *) 3715 ice_malloc(hw, sizeof(*pcaps)); 3716 if (!pcaps) 3717 return ICE_ERR_NO_MEMORY; 3718 3719 status = ice_aq_get_phy_caps(pi, false, 3720 (ice_fw_supports_report_dflt_cfg(hw) ? 3721 ICE_AQC_REPORT_DFLT_CFG : 3722 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3723 3724 if (status) 3725 goto out; 3726 3727 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC); 3728 cfg->link_fec_opt = pcaps->link_fec_options; 3729 3730 switch (fec) { 3731 case ICE_FEC_BASER: 3732 /* Clear RS bits, and AND BASE-R ability 3733 * bits and OR request bits. 3734 */ 3735 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3736 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3737 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3738 ICE_AQC_PHY_FEC_25G_KR_REQ; 3739 break; 3740 case ICE_FEC_RS: 3741 /* Clear BASE-R bits, and AND RS ability 3742 * bits and OR request bits. 3743 */ 3744 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3745 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3746 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3747 break; 3748 case ICE_FEC_NONE: 3749 /* Clear all FEC option bits. */ 3750 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3751 break; 3752 case ICE_FEC_DIS_AUTO: 3753 /* Set No FEC and auto FEC */ 3754 if (!ice_fw_supports_fec_dis_auto(hw)) { 3755 status = ICE_ERR_NOT_SUPPORTED; 3756 goto out; 3757 } 3758 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS; 3759 /* fall-through */ 3760 case ICE_FEC_AUTO: 3761 /* AND auto FEC bit, and all caps bits. */ 3762 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3763 cfg->link_fec_opt |= pcaps->link_fec_options; 3764 break; 3765 default: 3766 status = ICE_ERR_PARAM; 3767 break; 3768 } 3769 3770 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) && 3771 !ice_fw_supports_report_dflt_cfg(pi->hw)) { 3772 struct ice_link_default_override_tlv tlv; 3773 3774 if (ice_get_link_default_override(&tlv, pi)) 3775 goto out; 3776 3777 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3778 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3779 cfg->link_fec_opt = tlv.fec_options; 3780 } 3781 3782 out: 3783 ice_free(hw, pcaps); 3784 3785 return status; 3786 } 3787 3788 /** 3789 * ice_get_link_status - get status of the HW network link 3790 * @pi: port information structure 3791 * @link_up: pointer to bool (true/false = linkup/linkdown) 3792 * 3793 * Variable link_up is true if link is up, false if link is down. 3794 * The variable link_up is invalid if status is non zero. As a 3795 * result of this call, link status reporting becomes enabled 3796 */ 3797 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3798 { 3799 struct ice_phy_info *phy_info; 3800 enum ice_status status = ICE_SUCCESS; 3801 3802 if (!pi || !link_up) 3803 return ICE_ERR_PARAM; 3804 3805 phy_info = &pi->phy; 3806 3807 if (phy_info->get_link_info) { 3808 status = ice_update_link_info(pi); 3809 3810 if (status) 3811 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3812 status); 3813 } 3814 3815 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3816 3817 return status; 3818 } 3819 3820 /** 3821 * ice_aq_set_link_restart_an 3822 * @pi: pointer to the port information structure 3823 * @ena_link: if true: enable link, if false: disable link 3824 * @cd: pointer to command details structure or NULL 3825 * 3826 * Sets up the link and restarts the Auto-Negotiation over the link. 3827 */ 3828 enum ice_status 3829 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3830 struct ice_sq_cd *cd) 3831 { 3832 enum ice_status status = ICE_ERR_AQ_ERROR; 3833 struct ice_aqc_restart_an *cmd; 3834 struct ice_aq_desc desc; 3835 3836 cmd = &desc.params.restart_an; 3837 3838 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3839 3840 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3841 cmd->lport_num = pi->lport; 3842 if (ena_link) 3843 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3844 else 3845 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3846 3847 status = ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3848 if (status) 3849 return status; 3850 3851 if (ena_link) 3852 pi->phy.curr_user_phy_cfg.caps |= ICE_AQC_PHY_EN_LINK; 3853 else 3854 pi->phy.curr_user_phy_cfg.caps &= ~ICE_AQC_PHY_EN_LINK; 3855 3856 return ICE_SUCCESS; 3857 } 3858 3859 /** 3860 * ice_aq_set_event_mask 3861 * @hw: pointer to the HW struct 3862 * @port_num: port number of the physical function 3863 * @mask: event mask to be set 3864 * @cd: pointer to command details structure or NULL 3865 * 3866 * Set event mask (0x0613) 3867 */ 3868 enum ice_status 3869 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3870 struct ice_sq_cd *cd) 3871 { 3872 struct ice_aqc_set_event_mask *cmd; 3873 struct ice_aq_desc desc; 3874 3875 cmd = &desc.params.set_event_mask; 3876 3877 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3878 3879 cmd->lport_num = port_num; 3880 3881 cmd->event_mask = CPU_TO_LE16(mask); 3882 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3883 } 3884 3885 /** 3886 * ice_aq_set_mac_loopback 3887 * @hw: pointer to the HW struct 3888 * @ena_lpbk: Enable or Disable loopback 3889 * @cd: pointer to command details structure or NULL 3890 * 3891 * Enable/disable loopback on a given port 3892 */ 3893 enum ice_status 3894 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3895 { 3896 struct ice_aqc_set_mac_lb *cmd; 3897 struct ice_aq_desc desc; 3898 3899 cmd = &desc.params.set_mac_lb; 3900 3901 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3902 if (ena_lpbk) 3903 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3904 3905 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3906 } 3907 3908 /** 3909 * ice_aq_set_port_id_led 3910 * @pi: pointer to the port information 3911 * @is_orig_mode: is this LED set to original mode (by the net-list) 3912 * @cd: pointer to command details structure or NULL 3913 * 3914 * Set LED value for the given port (0x06e9) 3915 */ 3916 enum ice_status 3917 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3918 struct ice_sq_cd *cd) 3919 { 3920 struct ice_aqc_set_port_id_led *cmd; 3921 struct ice_hw *hw = pi->hw; 3922 struct ice_aq_desc desc; 3923 3924 cmd = &desc.params.set_port_id_led; 3925 3926 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3927 3928 if (is_orig_mode) 3929 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3930 else 3931 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3932 3933 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3934 } 3935 3936 /** 3937 * ice_aq_sff_eeprom 3938 * @hw: pointer to the HW struct 3939 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3940 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3941 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3942 * @page: QSFP page 3943 * @set_page: set or ignore the page 3944 * @data: pointer to data buffer to be read/written to the I2C device. 3945 * @length: 1-16 for read, 1 for write. 3946 * @write: 0 read, 1 for write. 3947 * @cd: pointer to command details structure or NULL 3948 * 3949 * Read/Write SFF EEPROM (0x06EE) 3950 */ 3951 enum ice_status 3952 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3953 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3954 bool write, struct ice_sq_cd *cd) 3955 { 3956 struct ice_aqc_sff_eeprom *cmd; 3957 struct ice_aq_desc desc; 3958 enum ice_status status; 3959 3960 if (!data || (mem_addr & 0xff00)) 3961 return ICE_ERR_PARAM; 3962 3963 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3964 cmd = &desc.params.read_write_sff_param; 3965 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD); 3966 cmd->lport_num = (u8)(lport & 0xff); 3967 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3968 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) & 3969 ICE_AQC_SFF_I2CBUS_7BIT_M) | 3970 ((set_page << 3971 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 3972 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 3973 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff); 3974 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 3975 if (write) 3976 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE); 3977 3978 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3979 return status; 3980 } 3981 3982 /** 3983 * ice_aq_prog_topo_dev_nvm 3984 * @hw: pointer to the hardware structure 3985 * @topo_params: pointer to structure storing topology parameters for a device 3986 * @cd: pointer to command details structure or NULL 3987 * 3988 * Program Topology Device NVM (0x06F2) 3989 * 3990 */ 3991 enum ice_status 3992 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw, 3993 struct ice_aqc_link_topo_params *topo_params, 3994 struct ice_sq_cd *cd) 3995 { 3996 struct ice_aqc_prog_topo_dev_nvm *cmd; 3997 struct ice_aq_desc desc; 3998 3999 cmd = &desc.params.prog_topo_dev_nvm; 4000 4001 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_prog_topo_dev_nvm); 4002 4003 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params), 4004 ICE_NONDMA_TO_NONDMA); 4005 4006 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4007 } 4008 4009 /** 4010 * ice_aq_read_topo_dev_nvm 4011 * @hw: pointer to the hardware structure 4012 * @topo_params: pointer to structure storing topology parameters for a device 4013 * @start_address: byte offset in the topology device NVM 4014 * @data: pointer to data buffer 4015 * @data_size: number of bytes to be read from the topology device NVM 4016 * @cd: pointer to command details structure or NULL 4017 * Read Topology Device NVM (0x06F3) 4018 * 4019 */ 4020 enum ice_status 4021 ice_aq_read_topo_dev_nvm(struct ice_hw *hw, 4022 struct ice_aqc_link_topo_params *topo_params, 4023 u32 start_address, u8 *data, u8 data_size, 4024 struct ice_sq_cd *cd) 4025 { 4026 struct ice_aqc_read_topo_dev_nvm *cmd; 4027 struct ice_aq_desc desc; 4028 enum ice_status status; 4029 4030 if (!data || data_size == 0 || 4031 data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE) 4032 return ICE_ERR_PARAM; 4033 4034 cmd = &desc.params.read_topo_dev_nvm; 4035 4036 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_topo_dev_nvm); 4037 4038 desc.datalen = CPU_TO_LE16(data_size); 4039 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params), 4040 ICE_NONDMA_TO_NONDMA); 4041 cmd->start_address = CPU_TO_LE32(start_address); 4042 4043 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4044 if (status) 4045 return status; 4046 4047 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA); 4048 4049 return ICE_SUCCESS; 4050 } 4051 4052 static u16 ice_lut_type_to_size(u16 lut_type) 4053 { 4054 switch (lut_type) { 4055 case ICE_LUT_VSI: 4056 return ICE_LUT_VSI_SIZE; 4057 case ICE_LUT_GLOBAL: 4058 return ICE_LUT_GLOBAL_SIZE; 4059 case ICE_LUT_PF: 4060 return ICE_LUT_PF_SIZE; 4061 default: 4062 return 0; 4063 } 4064 } 4065 4066 static u16 ice_lut_size_to_flag(u16 lut_size) 4067 { 4068 u16 f = 0; 4069 4070 switch (lut_size) { 4071 case ICE_LUT_GLOBAL_SIZE: 4072 f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG; 4073 break; 4074 case ICE_LUT_PF_SIZE: 4075 f = ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG; 4076 break; 4077 default: 4078 break; 4079 } 4080 return f << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S; 4081 } 4082 4083 int ice_lut_size_to_type(int lut_size) 4084 { 4085 switch (lut_size) { 4086 case ICE_LUT_VSI_SIZE: 4087 return ICE_LUT_VSI; 4088 case ICE_LUT_GLOBAL_SIZE: 4089 return ICE_LUT_GLOBAL; 4090 case ICE_LUT_PF_SIZE: 4091 return ICE_LUT_PF; 4092 default: 4093 return -1; 4094 } 4095 } 4096 4097 /** 4098 * __ice_aq_get_set_rss_lut 4099 * @hw: pointer to the hardware structure 4100 * @params: RSS LUT parameters 4101 * @set: set true to set the table, false to get the table 4102 * 4103 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 4104 */ 4105 static enum ice_status 4106 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) 4107 { 4108 u16 flags, vsi_id, lut_type, lut_size, glob_lut_idx = 0, vsi_handle; 4109 struct ice_aqc_get_set_rss_lut *cmd_resp; 4110 struct ice_aq_desc desc; 4111 enum ice_status status; 4112 u8 *lut; 4113 4114 if (!params) 4115 return ICE_ERR_PARAM; 4116 4117 vsi_handle = params->vsi_handle; 4118 lut = params->lut; 4119 lut_type = params->lut_type; 4120 lut_size = ice_lut_type_to_size(lut_type); 4121 cmd_resp = &desc.params.get_set_rss_lut; 4122 if (lut_type == ICE_LUT_GLOBAL) 4123 glob_lut_idx = params->global_lut_id; 4124 4125 if (!lut || !lut_size || !ice_is_vsi_valid(hw, vsi_handle)) 4126 return ICE_ERR_PARAM; 4127 4128 if (lut_size > params->lut_size) 4129 return ICE_ERR_INVAL_SIZE; 4130 4131 if (set && lut_size != params->lut_size) 4132 return ICE_ERR_PARAM; 4133 4134 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 4135 4136 if (set) { 4137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 4138 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4139 } else { 4140 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 4141 } 4142 4143 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << 4144 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 4145 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 4146 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 4147 4148 flags = ice_lut_size_to_flag(lut_size) | 4149 ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 4150 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M) | 4151 ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 4152 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 4153 4154 cmd_resp->flags = CPU_TO_LE16(flags); 4155 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 4156 params->lut_size = LE16_TO_CPU(desc.datalen); 4157 return status; 4158 } 4159 4160 /** 4161 * ice_aq_get_rss_lut 4162 * @hw: pointer to the hardware structure 4163 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 4164 * 4165 * get the RSS lookup table, PF or VSI type 4166 */ 4167 enum ice_status 4168 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 4169 { 4170 return __ice_aq_get_set_rss_lut(hw, get_params, false); 4171 } 4172 4173 /** 4174 * ice_aq_set_rss_lut 4175 * @hw: pointer to the hardware structure 4176 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 4177 * 4178 * set the RSS lookup table, PF or VSI type 4179 */ 4180 enum ice_status 4181 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 4182 { 4183 return __ice_aq_get_set_rss_lut(hw, set_params, true); 4184 } 4185 4186 /** 4187 * __ice_aq_get_set_rss_key 4188 * @hw: pointer to the HW struct 4189 * @vsi_id: VSI FW index 4190 * @key: pointer to key info struct 4191 * @set: set true to set the key, false to get the key 4192 * 4193 * get (0x0B04) or set (0x0B02) the RSS key per VSI 4194 */ 4195 static enum 4196 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 4197 struct ice_aqc_get_set_rss_keys *key, 4198 bool set) 4199 { 4200 struct ice_aqc_get_set_rss_key *cmd_resp; 4201 u16 key_size = sizeof(*key); 4202 struct ice_aq_desc desc; 4203 4204 cmd_resp = &desc.params.get_set_rss_key; 4205 4206 if (set) { 4207 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 4208 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4209 } else { 4210 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 4211 } 4212 4213 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id << 4214 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 4215 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 4216 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 4217 4218 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 4219 } 4220 4221 /** 4222 * ice_aq_get_rss_key 4223 * @hw: pointer to the HW struct 4224 * @vsi_handle: software VSI handle 4225 * @key: pointer to key info struct 4226 * 4227 * get the RSS key per VSI 4228 */ 4229 enum ice_status 4230 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 4231 struct ice_aqc_get_set_rss_keys *key) 4232 { 4233 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 4234 return ICE_ERR_PARAM; 4235 4236 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4237 key, false); 4238 } 4239 4240 /** 4241 * ice_aq_set_rss_key 4242 * @hw: pointer to the HW struct 4243 * @vsi_handle: software VSI handle 4244 * @keys: pointer to key info struct 4245 * 4246 * set the RSS key per VSI 4247 */ 4248 enum ice_status 4249 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 4250 struct ice_aqc_get_set_rss_keys *keys) 4251 { 4252 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 4253 return ICE_ERR_PARAM; 4254 4255 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 4256 keys, true); 4257 } 4258 4259 /** 4260 * ice_aq_add_lan_txq 4261 * @hw: pointer to the hardware structure 4262 * @num_qgrps: Number of added queue groups 4263 * @qg_list: list of queue groups to be added 4264 * @buf_size: size of buffer for indirect command 4265 * @cd: pointer to command details structure or NULL 4266 * 4267 * Add Tx LAN queue (0x0C30) 4268 * 4269 * NOTE: 4270 * Prior to calling add Tx LAN queue: 4271 * Initialize the following as part of the Tx queue context: 4272 * Completion queue ID if the queue uses Completion queue, Quanta profile, 4273 * Cache profile and Packet shaper profile. 4274 * 4275 * After add Tx LAN queue AQ command is completed: 4276 * Interrupts should be associated with specific queues, 4277 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 4278 * flow. 4279 */ 4280 enum ice_status 4281 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4282 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 4283 struct ice_sq_cd *cd) 4284 { 4285 struct ice_aqc_add_tx_qgrp *list; 4286 struct ice_aqc_add_txqs *cmd; 4287 struct ice_aq_desc desc; 4288 u16 i, sum_size = 0; 4289 4290 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 4291 4292 cmd = &desc.params.add_txqs; 4293 4294 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 4295 4296 if (!qg_list) 4297 return ICE_ERR_PARAM; 4298 4299 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4300 return ICE_ERR_PARAM; 4301 4302 for (i = 0, list = qg_list; i < num_qgrps; i++) { 4303 sum_size += ice_struct_size(list, txqs, list->num_txqs); 4304 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 4305 list->num_txqs); 4306 } 4307 4308 if (buf_size != sum_size) 4309 return ICE_ERR_PARAM; 4310 4311 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4312 4313 cmd->num_qgrps = num_qgrps; 4314 4315 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4316 } 4317 4318 /** 4319 * ice_aq_dis_lan_txq 4320 * @hw: pointer to the hardware structure 4321 * @num_qgrps: number of groups in the list 4322 * @qg_list: the list of groups to disable 4323 * @buf_size: the total size of the qg_list buffer in bytes 4324 * @rst_src: if called due to reset, specifies the reset source 4325 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4326 * @cd: pointer to command details structure or NULL 4327 * 4328 * Disable LAN Tx queue (0x0C31) 4329 */ 4330 static enum ice_status 4331 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 4332 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 4333 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4334 struct ice_sq_cd *cd) 4335 { 4336 struct ice_aqc_dis_txq_item *item; 4337 struct ice_aqc_dis_txqs *cmd; 4338 struct ice_aq_desc desc; 4339 enum ice_status status; 4340 u16 i, sz = 0; 4341 4342 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 4343 cmd = &desc.params.dis_txqs; 4344 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 4345 4346 /* qg_list can be NULL only in VM/VF reset flow */ 4347 if (!qg_list && !rst_src) 4348 return ICE_ERR_PARAM; 4349 4350 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 4351 return ICE_ERR_PARAM; 4352 4353 cmd->num_entries = num_qgrps; 4354 4355 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 4356 ICE_AQC_Q_DIS_TIMEOUT_M); 4357 4358 switch (rst_src) { 4359 case ICE_VM_RESET: 4360 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 4361 cmd->vmvf_and_timeout |= 4362 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 4363 break; 4364 case ICE_VF_RESET: 4365 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 4366 /* In this case, FW expects vmvf_num to be absolute VF ID */ 4367 cmd->vmvf_and_timeout |= 4368 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) & 4369 ICE_AQC_Q_DIS_VMVF_NUM_M); 4370 break; 4371 case ICE_NO_RESET: 4372 default: 4373 break; 4374 } 4375 4376 /* flush pipe on time out */ 4377 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 4378 /* If no queue group info, we are in a reset flow. Issue the AQ */ 4379 if (!qg_list) 4380 goto do_aq; 4381 4382 /* set RD bit to indicate that command buffer is provided by the driver 4383 * and it needs to be read by the firmware 4384 */ 4385 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4386 4387 for (i = 0, item = qg_list; i < num_qgrps; i++) { 4388 u16 item_size = ice_struct_size(item, q_id, item->num_qs); 4389 4390 /* If the num of queues is even, add 2 bytes of padding */ 4391 if ((item->num_qs % 2) == 0) 4392 item_size += 2; 4393 4394 sz += item_size; 4395 4396 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 4397 } 4398 4399 if (buf_size != sz) 4400 return ICE_ERR_PARAM; 4401 4402 do_aq: 4403 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 4404 if (status) { 4405 if (!qg_list) 4406 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 4407 vmvf_num, hw->adminq.sq_last_status); 4408 else 4409 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 4410 LE16_TO_CPU(qg_list[0].q_id[0]), 4411 hw->adminq.sq_last_status); 4412 } 4413 return status; 4414 } 4415 4416 /** 4417 * ice_aq_move_recfg_lan_txq 4418 * @hw: pointer to the hardware structure 4419 * @num_qs: number of queues to move/reconfigure 4420 * @is_move: true if this operation involves node movement 4421 * @is_tc_change: true if this operation involves a TC change 4422 * @subseq_call: true if this operation is a subsequent call 4423 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN 4424 * @timeout: timeout in units of 100 usec (valid values 0-50) 4425 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN 4426 * @buf: struct containing src/dest TEID and per-queue info 4427 * @buf_size: size of buffer for indirect command 4428 * @txqs_moved: out param, number of queues successfully moved 4429 * @cd: pointer to command details structure or NULL 4430 * 4431 * Move / Reconfigure Tx LAN queues (0x0C32) 4432 */ 4433 enum ice_status 4434 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move, 4435 bool is_tc_change, bool subseq_call, bool flush_pipe, 4436 u8 timeout, u32 *blocked_cgds, 4437 struct ice_aqc_move_txqs_data *buf, u16 buf_size, 4438 u8 *txqs_moved, struct ice_sq_cd *cd) 4439 { 4440 struct ice_aqc_move_txqs *cmd; 4441 struct ice_aq_desc desc; 4442 enum ice_status status; 4443 4444 cmd = &desc.params.move_txqs; 4445 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs); 4446 4447 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50 4448 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX) 4449 return ICE_ERR_PARAM; 4450 4451 if (is_tc_change && !flush_pipe && !blocked_cgds) 4452 return ICE_ERR_PARAM; 4453 4454 if (!is_move && !is_tc_change) 4455 return ICE_ERR_PARAM; 4456 4457 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4458 4459 if (is_move) 4460 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE; 4461 4462 if (is_tc_change) 4463 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE; 4464 4465 if (subseq_call) 4466 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL; 4467 4468 if (flush_pipe) 4469 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE; 4470 4471 cmd->num_qs = num_qs; 4472 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) & 4473 ICE_AQC_Q_CMD_TIMEOUT_M); 4474 4475 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4476 4477 if (!status && txqs_moved) 4478 *txqs_moved = cmd->num_qs; 4479 4480 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN && 4481 is_tc_change && !flush_pipe) 4482 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds); 4483 4484 return status; 4485 } 4486 4487 /** 4488 * ice_aq_add_rdma_qsets 4489 * @hw: pointer to the hardware structure 4490 * @num_qset_grps: Number of RDMA Qset groups 4491 * @qset_list: list of qset groups to be added 4492 * @buf_size: size of buffer for indirect command 4493 * @cd: pointer to command details structure or NULL 4494 * 4495 * Add Tx RDMA Qsets (0x0C33) 4496 */ 4497 enum ice_status 4498 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 4499 struct ice_aqc_add_rdma_qset_data *qset_list, 4500 u16 buf_size, struct ice_sq_cd *cd) 4501 { 4502 struct ice_aqc_add_rdma_qset_data *list; 4503 struct ice_aqc_add_rdma_qset *cmd; 4504 struct ice_aq_desc desc; 4505 u16 i, sum_size = 0; 4506 4507 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__); 4508 4509 cmd = &desc.params.add_rdma_qset; 4510 4511 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 4512 4513 if (!qset_list) 4514 return ICE_ERR_PARAM; 4515 4516 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 4517 return ICE_ERR_PARAM; 4518 4519 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 4520 u16 num_qsets = LE16_TO_CPU(list->num_qsets); 4521 4522 sum_size += ice_struct_size(list, rdma_qsets, num_qsets); 4523 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 4524 num_qsets); 4525 } 4526 4527 if (buf_size != sum_size) 4528 return ICE_ERR_PARAM; 4529 4530 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD); 4531 4532 cmd->num_qset_grps = num_qset_grps; 4533 4534 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd); 4535 } 4536 4537 /* End of FW Admin Queue command wrappers */ 4538 4539 /** 4540 * ice_write_byte - write a byte to a packed context structure 4541 * @src_ctx: the context structure to read from 4542 * @dest_ctx: the context to be written to 4543 * @ce_info: a description of the struct to be filled 4544 */ 4545 static void 4546 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4547 { 4548 u8 src_byte, dest_byte, mask; 4549 u8 *from, *dest; 4550 u16 shift_width; 4551 4552 /* copy from the next struct field */ 4553 from = src_ctx + ce_info->offset; 4554 4555 /* prepare the bits and mask */ 4556 shift_width = ce_info->lsb % 8; 4557 mask = (u8)(BIT(ce_info->width) - 1); 4558 4559 src_byte = *from; 4560 src_byte &= mask; 4561 4562 /* shift to correct alignment */ 4563 mask <<= shift_width; 4564 src_byte <<= shift_width; 4565 4566 /* get the current bits from the target bit string */ 4567 dest = dest_ctx + (ce_info->lsb / 8); 4568 4569 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA); 4570 4571 dest_byte &= ~mask; /* get the bits not changing */ 4572 dest_byte |= src_byte; /* add in the new bits */ 4573 4574 /* put it all back */ 4575 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); 4576 } 4577 4578 /** 4579 * ice_write_word - write a word to a packed context structure 4580 * @src_ctx: the context structure to read from 4581 * @dest_ctx: the context to be written to 4582 * @ce_info: a description of the struct to be filled 4583 */ 4584 static void 4585 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4586 { 4587 u16 src_word, mask; 4588 __le16 dest_word; 4589 u8 *from, *dest; 4590 u16 shift_width; 4591 4592 /* copy from the next struct field */ 4593 from = src_ctx + ce_info->offset; 4594 4595 /* prepare the bits and mask */ 4596 shift_width = ce_info->lsb % 8; 4597 mask = BIT(ce_info->width) - 1; 4598 4599 /* don't swizzle the bits until after the mask because the mask bits 4600 * will be in a different bit position on big endian machines 4601 */ 4602 src_word = *(u16 *)from; 4603 src_word &= mask; 4604 4605 /* shift to correct alignment */ 4606 mask <<= shift_width; 4607 src_word <<= shift_width; 4608 4609 /* get the current bits from the target bit string */ 4610 dest = dest_ctx + (ce_info->lsb / 8); 4611 4612 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA); 4613 4614 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */ 4615 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */ 4616 4617 /* put it all back */ 4618 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); 4619 } 4620 4621 /** 4622 * ice_write_dword - write a dword to a packed context structure 4623 * @src_ctx: the context structure to read from 4624 * @dest_ctx: the context to be written to 4625 * @ce_info: a description of the struct to be filled 4626 */ 4627 static void 4628 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4629 { 4630 u32 src_dword, mask; 4631 __le32 dest_dword; 4632 u8 *from, *dest; 4633 u16 shift_width; 4634 4635 /* copy from the next struct field */ 4636 from = src_ctx + ce_info->offset; 4637 4638 /* prepare the bits and mask */ 4639 shift_width = ce_info->lsb % 8; 4640 4641 /* if the field width is exactly 32 on an x86 machine, then the shift 4642 * operation will not work because the SHL instructions count is masked 4643 * to 5 bits so the shift will do nothing 4644 */ 4645 if (ce_info->width < 32) 4646 mask = BIT(ce_info->width) - 1; 4647 else 4648 mask = (u32)~0; 4649 4650 /* don't swizzle the bits until after the mask because the mask bits 4651 * will be in a different bit position on big endian machines 4652 */ 4653 src_dword = *(u32 *)from; 4654 src_dword &= mask; 4655 4656 /* shift to correct alignment */ 4657 mask <<= shift_width; 4658 src_dword <<= shift_width; 4659 4660 /* get the current bits from the target bit string */ 4661 dest = dest_ctx + (ce_info->lsb / 8); 4662 4663 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA); 4664 4665 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */ 4666 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */ 4667 4668 /* put it all back */ 4669 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); 4670 } 4671 4672 /** 4673 * ice_write_qword - write a qword to a packed context structure 4674 * @src_ctx: the context structure to read from 4675 * @dest_ctx: the context to be written to 4676 * @ce_info: a description of the struct to be filled 4677 */ 4678 static void 4679 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4680 { 4681 u64 src_qword, mask; 4682 __le64 dest_qword; 4683 u8 *from, *dest; 4684 u16 shift_width; 4685 4686 /* copy from the next struct field */ 4687 from = src_ctx + ce_info->offset; 4688 4689 /* prepare the bits and mask */ 4690 shift_width = ce_info->lsb % 8; 4691 4692 /* if the field width is exactly 64 on an x86 machine, then the shift 4693 * operation will not work because the SHL instructions count is masked 4694 * to 6 bits so the shift will do nothing 4695 */ 4696 if (ce_info->width < 64) 4697 mask = BIT_ULL(ce_info->width) - 1; 4698 else 4699 mask = (u64)~0; 4700 4701 /* don't swizzle the bits until after the mask because the mask bits 4702 * will be in a different bit position on big endian machines 4703 */ 4704 src_qword = *(u64 *)from; 4705 src_qword &= mask; 4706 4707 /* shift to correct alignment */ 4708 mask <<= shift_width; 4709 src_qword <<= shift_width; 4710 4711 /* get the current bits from the target bit string */ 4712 dest = dest_ctx + (ce_info->lsb / 8); 4713 4714 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA); 4715 4716 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */ 4717 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */ 4718 4719 /* put it all back */ 4720 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); 4721 } 4722 4723 /** 4724 * ice_set_ctx - set context bits in packed structure 4725 * @hw: pointer to the hardware structure 4726 * @src_ctx: pointer to a generic non-packed context structure 4727 * @dest_ctx: pointer to memory for the packed structure 4728 * @ce_info: a description of the structure to be transformed 4729 */ 4730 enum ice_status 4731 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4732 const struct ice_ctx_ele *ce_info) 4733 { 4734 int f; 4735 4736 for (f = 0; ce_info[f].width; f++) { 4737 /* We have to deal with each element of the FW response 4738 * using the correct size so that we are correct regardless 4739 * of the endianness of the machine. 4740 */ 4741 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4742 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4743 f, ce_info[f].width, ce_info[f].size_of); 4744 continue; 4745 } 4746 switch (ce_info[f].size_of) { 4747 case sizeof(u8): 4748 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4749 break; 4750 case sizeof(u16): 4751 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4752 break; 4753 case sizeof(u32): 4754 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4755 break; 4756 case sizeof(u64): 4757 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4758 break; 4759 default: 4760 return ICE_ERR_INVAL_SIZE; 4761 } 4762 } 4763 4764 return ICE_SUCCESS; 4765 } 4766 4767 /** 4768 * ice_aq_get_internal_data 4769 * @hw: pointer to the hardware structure 4770 * @cluster_id: specific cluster to dump 4771 * @table_id: table ID within cluster 4772 * @start: index of line in the block to read 4773 * @buf: dump buffer 4774 * @buf_size: dump buffer size 4775 * @ret_buf_size: return buffer size (returned by FW) 4776 * @ret_next_table: next block to read (returned by FW) 4777 * @ret_next_index: next index to read (returned by FW) 4778 * @cd: pointer to command details structure 4779 * 4780 * Get internal FW/HW data (0xFF08) for debug purposes. 4781 */ 4782 enum ice_status 4783 ice_aq_get_internal_data(struct ice_hw *hw, u8 cluster_id, u16 table_id, 4784 u32 start, void *buf, u16 buf_size, u16 *ret_buf_size, 4785 u16 *ret_next_table, u32 *ret_next_index, 4786 struct ice_sq_cd *cd) 4787 { 4788 struct ice_aqc_debug_dump_internals *cmd; 4789 struct ice_aq_desc desc; 4790 enum ice_status status; 4791 4792 cmd = &desc.params.debug_dump; 4793 4794 if (buf_size == 0 || !buf) 4795 return ICE_ERR_PARAM; 4796 4797 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_debug_dump_internals); 4798 4799 cmd->cluster_id = cluster_id; 4800 cmd->table_id = CPU_TO_LE16(table_id); 4801 cmd->idx = CPU_TO_LE32(start); 4802 4803 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4804 4805 if (!status) { 4806 if (ret_buf_size) 4807 *ret_buf_size = LE16_TO_CPU(desc.datalen); 4808 if (ret_next_table) 4809 *ret_next_table = LE16_TO_CPU(cmd->table_id); 4810 if (ret_next_index) 4811 *ret_next_index = LE32_TO_CPU(cmd->idx); 4812 } 4813 4814 return status; 4815 } 4816 4817 /** 4818 * ice_read_byte - read context byte into struct 4819 * @src_ctx: the context structure to read from 4820 * @dest_ctx: the context to be written to 4821 * @ce_info: a description of the struct to be filled 4822 */ 4823 static void 4824 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4825 { 4826 u8 dest_byte, mask; 4827 u8 *src, *target; 4828 u16 shift_width; 4829 4830 /* prepare the bits and mask */ 4831 shift_width = ce_info->lsb % 8; 4832 mask = (u8)(BIT(ce_info->width) - 1); 4833 4834 /* shift to correct alignment */ 4835 mask <<= shift_width; 4836 4837 /* get the current bits from the src bit string */ 4838 src = src_ctx + (ce_info->lsb / 8); 4839 4840 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA); 4841 4842 dest_byte &= mask; 4843 4844 dest_byte >>= shift_width; 4845 4846 /* get the address from the struct field */ 4847 target = dest_ctx + ce_info->offset; 4848 4849 /* put it back in the struct */ 4850 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA); 4851 } 4852 4853 /** 4854 * ice_read_word - read context word into struct 4855 * @src_ctx: the context structure to read from 4856 * @dest_ctx: the context to be written to 4857 * @ce_info: a description of the struct to be filled 4858 */ 4859 static void 4860 ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4861 { 4862 u16 dest_word, mask; 4863 u8 *src, *target; 4864 __le16 src_word; 4865 u16 shift_width; 4866 4867 /* prepare the bits and mask */ 4868 shift_width = ce_info->lsb % 8; 4869 mask = BIT(ce_info->width) - 1; 4870 4871 /* shift to correct alignment */ 4872 mask <<= shift_width; 4873 4874 /* get the current bits from the src bit string */ 4875 src = src_ctx + (ce_info->lsb / 8); 4876 4877 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA); 4878 4879 /* the data in the memory is stored as little endian so mask it 4880 * correctly 4881 */ 4882 src_word &= CPU_TO_LE16(mask); 4883 4884 /* get the data back into host order before shifting */ 4885 dest_word = LE16_TO_CPU(src_word); 4886 4887 dest_word >>= shift_width; 4888 4889 /* get the address from the struct field */ 4890 target = dest_ctx + ce_info->offset; 4891 4892 /* put it back in the struct */ 4893 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA); 4894 } 4895 4896 /** 4897 * ice_read_dword - read context dword into struct 4898 * @src_ctx: the context structure to read from 4899 * @dest_ctx: the context to be written to 4900 * @ce_info: a description of the struct to be filled 4901 */ 4902 static void 4903 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4904 { 4905 u32 dest_dword, mask; 4906 __le32 src_dword; 4907 u8 *src, *target; 4908 u16 shift_width; 4909 4910 /* prepare the bits and mask */ 4911 shift_width = ce_info->lsb % 8; 4912 4913 /* if the field width is exactly 32 on an x86 machine, then the shift 4914 * operation will not work because the SHL instructions count is masked 4915 * to 5 bits so the shift will do nothing 4916 */ 4917 if (ce_info->width < 32) 4918 mask = BIT(ce_info->width) - 1; 4919 else 4920 mask = (u32)~0; 4921 4922 /* shift to correct alignment */ 4923 mask <<= shift_width; 4924 4925 /* get the current bits from the src bit string */ 4926 src = src_ctx + (ce_info->lsb / 8); 4927 4928 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA); 4929 4930 /* the data in the memory is stored as little endian so mask it 4931 * correctly 4932 */ 4933 src_dword &= CPU_TO_LE32(mask); 4934 4935 /* get the data back into host order before shifting */ 4936 dest_dword = LE32_TO_CPU(src_dword); 4937 4938 dest_dword >>= shift_width; 4939 4940 /* get the address from the struct field */ 4941 target = dest_ctx + ce_info->offset; 4942 4943 /* put it back in the struct */ 4944 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA); 4945 } 4946 4947 /** 4948 * ice_read_qword - read context qword into struct 4949 * @src_ctx: the context structure to read from 4950 * @dest_ctx: the context to be written to 4951 * @ce_info: a description of the struct to be filled 4952 */ 4953 static void 4954 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4955 { 4956 u64 dest_qword, mask; 4957 __le64 src_qword; 4958 u8 *src, *target; 4959 u16 shift_width; 4960 4961 /* prepare the bits and mask */ 4962 shift_width = ce_info->lsb % 8; 4963 4964 /* if the field width is exactly 64 on an x86 machine, then the shift 4965 * operation will not work because the SHL instructions count is masked 4966 * to 6 bits so the shift will do nothing 4967 */ 4968 if (ce_info->width < 64) 4969 mask = BIT_ULL(ce_info->width) - 1; 4970 else 4971 mask = (u64)~0; 4972 4973 /* shift to correct alignment */ 4974 mask <<= shift_width; 4975 4976 /* get the current bits from the src bit string */ 4977 src = src_ctx + (ce_info->lsb / 8); 4978 4979 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA); 4980 4981 /* the data in the memory is stored as little endian so mask it 4982 * correctly 4983 */ 4984 src_qword &= CPU_TO_LE64(mask); 4985 4986 /* get the data back into host order before shifting */ 4987 dest_qword = LE64_TO_CPU(src_qword); 4988 4989 dest_qword >>= shift_width; 4990 4991 /* get the address from the struct field */ 4992 target = dest_ctx + ce_info->offset; 4993 4994 /* put it back in the struct */ 4995 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA); 4996 } 4997 4998 /** 4999 * ice_get_ctx - extract context bits from a packed structure 5000 * @src_ctx: pointer to a generic packed context structure 5001 * @dest_ctx: pointer to a generic non-packed context structure 5002 * @ce_info: a description of the structure to be read from 5003 */ 5004 enum ice_status 5005 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 5006 { 5007 int f; 5008 5009 for (f = 0; ce_info[f].width; f++) { 5010 switch (ce_info[f].size_of) { 5011 case 1: 5012 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]); 5013 break; 5014 case 2: 5015 ice_read_word(src_ctx, dest_ctx, &ce_info[f]); 5016 break; 5017 case 4: 5018 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]); 5019 break; 5020 case 8: 5021 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]); 5022 break; 5023 default: 5024 /* nothing to do, just keep going */ 5025 break; 5026 } 5027 } 5028 5029 return ICE_SUCCESS; 5030 } 5031 5032 /** 5033 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 5034 * @hw: pointer to the HW struct 5035 * @vsi_handle: software VSI handle 5036 * @tc: TC number 5037 * @q_handle: software queue handle 5038 */ 5039 struct ice_q_ctx * 5040 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 5041 { 5042 struct ice_vsi_ctx *vsi; 5043 struct ice_q_ctx *q_ctx; 5044 5045 vsi = ice_get_vsi_ctx(hw, vsi_handle); 5046 if (!vsi) 5047 return NULL; 5048 if (q_handle >= vsi->num_lan_q_entries[tc]) 5049 return NULL; 5050 if (!vsi->lan_q_ctx[tc]) 5051 return NULL; 5052 q_ctx = vsi->lan_q_ctx[tc]; 5053 return &q_ctx[q_handle]; 5054 } 5055 5056 /** 5057 * ice_ena_vsi_txq 5058 * @pi: port information structure 5059 * @vsi_handle: software VSI handle 5060 * @tc: TC number 5061 * @q_handle: software queue handle 5062 * @num_qgrps: Number of added queue groups 5063 * @buf: list of queue groups to be added 5064 * @buf_size: size of buffer for indirect command 5065 * @cd: pointer to command details structure or NULL 5066 * 5067 * This function adds one LAN queue 5068 */ 5069 enum ice_status 5070 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 5071 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 5072 struct ice_sq_cd *cd) 5073 { 5074 struct ice_aqc_txsched_elem_data node = { 0 }; 5075 struct ice_sched_node *parent; 5076 struct ice_q_ctx *q_ctx; 5077 enum ice_status status; 5078 struct ice_hw *hw; 5079 5080 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5081 return ICE_ERR_CFG; 5082 5083 if (num_qgrps > 1 || buf->num_txqs > 1) 5084 return ICE_ERR_MAX_LIMIT; 5085 5086 hw = pi->hw; 5087 5088 if (!ice_is_vsi_valid(hw, vsi_handle)) 5089 return ICE_ERR_PARAM; 5090 5091 ice_acquire_lock(&pi->sched_lock); 5092 5093 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 5094 if (!q_ctx) { 5095 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 5096 q_handle); 5097 status = ICE_ERR_PARAM; 5098 goto ena_txq_exit; 5099 } 5100 5101 /* find a parent node */ 5102 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 5103 ICE_SCHED_NODE_OWNER_LAN); 5104 if (!parent) { 5105 status = ICE_ERR_PARAM; 5106 goto ena_txq_exit; 5107 } 5108 5109 buf->parent_teid = parent->info.node_teid; 5110 node.parent_teid = parent->info.node_teid; 5111 /* Mark that the values in the "generic" section as valid. The default 5112 * value in the "generic" section is zero. This means that : 5113 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 5114 * - 0 priority among siblings, indicated by Bit 1-3. 5115 * - WFQ, indicated by Bit 4. 5116 * - 0 Adjustment value is used in PSM credit update flow, indicated by 5117 * Bit 5-6. 5118 * - Bit 7 is reserved. 5119 * Without setting the generic section as valid in valid_sections, the 5120 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 5121 */ 5122 buf->txqs[0].info.valid_sections = 5123 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5124 ICE_AQC_ELEM_VALID_EIR; 5125 buf->txqs[0].info.generic = 0; 5126 buf->txqs[0].info.cir_bw.bw_profile_idx = 5127 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 5128 buf->txqs[0].info.cir_bw.bw_alloc = 5129 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 5130 buf->txqs[0].info.eir_bw.bw_profile_idx = 5131 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 5132 buf->txqs[0].info.eir_bw.bw_alloc = 5133 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 5134 5135 /* add the LAN queue */ 5136 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 5137 if (status != ICE_SUCCESS) { 5138 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 5139 LE16_TO_CPU(buf->txqs[0].txq_id), 5140 hw->adminq.sq_last_status); 5141 goto ena_txq_exit; 5142 } 5143 5144 node.node_teid = buf->txqs[0].q_teid; 5145 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5146 q_ctx->q_handle = q_handle; 5147 q_ctx->q_teid = LE32_TO_CPU(node.node_teid); 5148 5149 /* add a leaf node into scheduler tree queue layer */ 5150 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL); 5151 if (!status) 5152 status = ice_sched_replay_q_bw(pi, q_ctx); 5153 5154 ena_txq_exit: 5155 ice_release_lock(&pi->sched_lock); 5156 return status; 5157 } 5158 5159 /** 5160 * ice_dis_vsi_txq 5161 * @pi: port information structure 5162 * @vsi_handle: software VSI handle 5163 * @tc: TC number 5164 * @num_queues: number of queues 5165 * @q_handles: pointer to software queue handle array 5166 * @q_ids: pointer to the q_id array 5167 * @q_teids: pointer to queue node teids 5168 * @rst_src: if called due to reset, specifies the reset source 5169 * @vmvf_num: the relative VM or VF number that is undergoing the reset 5170 * @cd: pointer to command details structure or NULL 5171 * 5172 * This function removes queues and their corresponding nodes in SW DB 5173 */ 5174 enum ice_status 5175 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 5176 u16 *q_handles, u16 *q_ids, u32 *q_teids, 5177 enum ice_disq_rst_src rst_src, u16 vmvf_num, 5178 struct ice_sq_cd *cd) 5179 { 5180 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 5181 struct ice_aqc_dis_txq_item *qg_list; 5182 struct ice_q_ctx *q_ctx; 5183 struct ice_hw *hw; 5184 u16 i, buf_size; 5185 5186 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5187 return ICE_ERR_CFG; 5188 5189 hw = pi->hw; 5190 5191 if (!num_queues) { 5192 /* if queue is disabled already yet the disable queue command 5193 * has to be sent to complete the VF reset, then call 5194 * ice_aq_dis_lan_txq without any queue information 5195 */ 5196 if (rst_src) 5197 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 5198 vmvf_num, NULL); 5199 return ICE_ERR_CFG; 5200 } 5201 5202 buf_size = ice_struct_size(qg_list, q_id, 1); 5203 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size); 5204 if (!qg_list) 5205 return ICE_ERR_NO_MEMORY; 5206 5207 ice_acquire_lock(&pi->sched_lock); 5208 5209 for (i = 0; i < num_queues; i++) { 5210 struct ice_sched_node *node; 5211 5212 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 5213 if (!node) 5214 continue; 5215 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 5216 if (!q_ctx) { 5217 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 5218 q_handles[i]); 5219 continue; 5220 } 5221 if (q_ctx->q_handle != q_handles[i]) { 5222 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 5223 q_ctx->q_handle, q_handles[i]); 5224 continue; 5225 } 5226 qg_list->parent_teid = node->info.parent_teid; 5227 qg_list->num_qs = 1; 5228 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]); 5229 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 5230 vmvf_num, cd); 5231 5232 if (status != ICE_SUCCESS) 5233 break; 5234 ice_free_sched_node(pi, node); 5235 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 5236 } 5237 ice_release_lock(&pi->sched_lock); 5238 ice_free(hw, qg_list); 5239 return status; 5240 } 5241 5242 /** 5243 * ice_cfg_vsi_qs - configure the new/existing VSI queues 5244 * @pi: port information structure 5245 * @vsi_handle: software VSI handle 5246 * @tc_bitmap: TC bitmap 5247 * @maxqs: max queues array per TC 5248 * @owner: LAN or RDMA 5249 * 5250 * This function adds/updates the VSI queues per TC. 5251 */ 5252 static enum ice_status 5253 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5254 u16 *maxqs, u8 owner) 5255 { 5256 enum ice_status status = ICE_SUCCESS; 5257 u8 i; 5258 5259 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5260 return ICE_ERR_CFG; 5261 5262 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 5263 return ICE_ERR_PARAM; 5264 5265 ice_acquire_lock(&pi->sched_lock); 5266 5267 ice_for_each_traffic_class(i) { 5268 /* configuration is possible only if TC node is present */ 5269 if (!ice_sched_get_tc_node(pi, i)) 5270 continue; 5271 5272 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 5273 ice_is_tc_ena(tc_bitmap, i)); 5274 if (status) 5275 break; 5276 } 5277 5278 ice_release_lock(&pi->sched_lock); 5279 return status; 5280 } 5281 5282 /** 5283 * ice_cfg_vsi_lan - configure VSI LAN queues 5284 * @pi: port information structure 5285 * @vsi_handle: software VSI handle 5286 * @tc_bitmap: TC bitmap 5287 * @max_lanqs: max LAN queues array per TC 5288 * 5289 * This function adds/updates the VSI LAN queues per TC. 5290 */ 5291 enum ice_status 5292 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5293 u16 *max_lanqs) 5294 { 5295 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 5296 ICE_SCHED_NODE_OWNER_LAN); 5297 } 5298 5299 /** 5300 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 5301 * @pi: port information structure 5302 * @vsi_handle: software VSI handle 5303 * @tc_bitmap: TC bitmap 5304 * @max_rdmaqs: max RDMA queues array per TC 5305 * 5306 * This function adds/updates the VSI RDMA queues per TC. 5307 */ 5308 enum ice_status 5309 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 5310 u16 *max_rdmaqs) 5311 { 5312 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_rdmaqs, 5313 ICE_SCHED_NODE_OWNER_RDMA); 5314 } 5315 5316 /** 5317 * ice_ena_vsi_rdma_qset 5318 * @pi: port information structure 5319 * @vsi_handle: software VSI handle 5320 * @tc: TC number 5321 * @rdma_qset: pointer to RDMA qset 5322 * @num_qsets: number of RDMA qsets 5323 * @qset_teid: pointer to qset node teids 5324 * 5325 * This function adds RDMA qset 5326 */ 5327 enum ice_status 5328 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 5329 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 5330 { 5331 struct ice_aqc_txsched_elem_data node = { 0 }; 5332 struct ice_aqc_add_rdma_qset_data *buf; 5333 struct ice_sched_node *parent; 5334 enum ice_status status; 5335 struct ice_hw *hw; 5336 u16 i, buf_size; 5337 5338 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5339 return ICE_ERR_CFG; 5340 hw = pi->hw; 5341 5342 if (!ice_is_vsi_valid(hw, vsi_handle)) 5343 return ICE_ERR_PARAM; 5344 5345 buf_size = ice_struct_size(buf, rdma_qsets, num_qsets); 5346 buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size); 5347 if (!buf) 5348 return ICE_ERR_NO_MEMORY; 5349 ice_acquire_lock(&pi->sched_lock); 5350 5351 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 5352 ICE_SCHED_NODE_OWNER_RDMA); 5353 if (!parent) { 5354 status = ICE_ERR_PARAM; 5355 goto rdma_error_exit; 5356 } 5357 buf->parent_teid = parent->info.node_teid; 5358 node.parent_teid = parent->info.node_teid; 5359 5360 buf->num_qsets = CPU_TO_LE16(num_qsets); 5361 for (i = 0; i < num_qsets; i++) { 5362 buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]); 5363 buf->rdma_qsets[i].info.valid_sections = 5364 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 5365 ICE_AQC_ELEM_VALID_EIR; 5366 buf->rdma_qsets[i].info.generic = 0; 5367 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 5368 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 5369 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 5370 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 5371 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 5372 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID); 5373 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 5374 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT); 5375 } 5376 status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 5377 if (status != ICE_SUCCESS) { 5378 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 5379 goto rdma_error_exit; 5380 } 5381 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 5382 for (i = 0; i < num_qsets; i++) { 5383 node.node_teid = buf->rdma_qsets[i].qset_teid; 5384 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 5385 &node, NULL); 5386 if (status) 5387 break; 5388 qset_teid[i] = LE32_TO_CPU(node.node_teid); 5389 } 5390 rdma_error_exit: 5391 ice_release_lock(&pi->sched_lock); 5392 ice_free(hw, buf); 5393 return status; 5394 } 5395 5396 /** 5397 * ice_dis_vsi_rdma_qset - free RDMA resources 5398 * @pi: port_info struct 5399 * @count: number of RDMA qsets to free 5400 * @qset_teid: TEID of qset node 5401 * @q_id: list of queue IDs being disabled 5402 */ 5403 enum ice_status 5404 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 5405 u16 *q_id) 5406 { 5407 struct ice_aqc_dis_txq_item *qg_list; 5408 enum ice_status status = ICE_SUCCESS; 5409 struct ice_hw *hw; 5410 u16 qg_size; 5411 int i; 5412 5413 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 5414 return ICE_ERR_CFG; 5415 5416 hw = pi->hw; 5417 5418 qg_size = ice_struct_size(qg_list, q_id, 1); 5419 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size); 5420 if (!qg_list) 5421 return ICE_ERR_NO_MEMORY; 5422 5423 ice_acquire_lock(&pi->sched_lock); 5424 5425 for (i = 0; i < count; i++) { 5426 struct ice_sched_node *node; 5427 5428 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 5429 if (!node) 5430 continue; 5431 5432 qg_list->parent_teid = node->info.parent_teid; 5433 qg_list->num_qs = 1; 5434 qg_list->q_id[0] = 5435 CPU_TO_LE16(q_id[i] | 5436 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 5437 5438 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 5439 ICE_NO_RESET, 0, NULL); 5440 if (status) 5441 break; 5442 5443 ice_free_sched_node(pi, node); 5444 } 5445 5446 ice_release_lock(&pi->sched_lock); 5447 ice_free(hw, qg_list); 5448 return status; 5449 } 5450 5451 /** 5452 * ice_aq_get_sensor_reading 5453 * @hw: pointer to the HW struct 5454 * @sensor: sensor type 5455 * @format: requested response format 5456 * @data: pointer to data to be read from the sensor 5457 * @cd: pointer to command details structure or NULL 5458 * 5459 * Get sensor reading (0x0632) 5460 */ 5461 enum ice_status 5462 ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format, 5463 struct ice_aqc_get_sensor_reading_resp *data, 5464 struct ice_sq_cd *cd) 5465 { 5466 struct ice_aqc_get_sensor_reading *cmd; 5467 struct ice_aq_desc desc; 5468 enum ice_status status; 5469 5470 if (!data) 5471 return ICE_ERR_PARAM; 5472 5473 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading); 5474 cmd = &desc.params.get_sensor_reading; 5475 cmd->sensor = sensor; 5476 cmd->format = format; 5477 5478 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 5479 5480 if (!status) 5481 ice_memcpy(data, &desc.params.get_sensor_reading_resp, 5482 sizeof(*data), ICE_NONDMA_TO_NONDMA); 5483 5484 return status; 5485 } 5486 5487 /** 5488 * ice_is_main_vsi - checks whether the VSI is main VSI 5489 * @hw: pointer to the HW struct 5490 * @vsi_handle: VSI handle 5491 * 5492 * Checks whether the VSI is the main VSI (the first PF VSI created on 5493 * given PF). 5494 */ 5495 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle) 5496 { 5497 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle]; 5498 } 5499 5500 /** 5501 * ice_replay_pre_init - replay pre initialization 5502 * @hw: pointer to the HW struct 5503 * @sw: pointer to switch info struct for which function initializes filters 5504 * 5505 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 5506 */ 5507 enum ice_status 5508 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw) 5509 { 5510 enum ice_status status; 5511 u8 i; 5512 5513 /* Delete old entries from replay filter list head if there is any */ 5514 ice_rm_sw_replay_rule_info(hw, sw); 5515 /* In start of replay, move entries into replay_rules list, it 5516 * will allow adding rules entries back to filt_rules list, 5517 * which is operational list. 5518 */ 5519 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) 5520 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules, 5521 &sw->recp_list[i].filt_replay_rules); 5522 ice_sched_replay_agg_vsi_preinit(hw); 5523 5524 status = ice_sched_replay_root_node_bw(hw->port_info); 5525 if (status) 5526 return status; 5527 5528 return ice_sched_replay_tc_node_bw(hw->port_info); 5529 } 5530 5531 /** 5532 * ice_replay_vsi - replay VSI configuration 5533 * @hw: pointer to the HW struct 5534 * @vsi_handle: driver VSI handle 5535 * 5536 * Restore all VSI configuration after reset. It is required to call this 5537 * function with main VSI first. 5538 */ 5539 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 5540 { 5541 struct ice_switch_info *sw = hw->switch_info; 5542 struct ice_port_info *pi = hw->port_info; 5543 enum ice_status status; 5544 5545 if (!ice_is_vsi_valid(hw, vsi_handle)) 5546 return ICE_ERR_PARAM; 5547 5548 /* Replay pre-initialization if there is any */ 5549 if (ice_is_main_vsi(hw, vsi_handle)) { 5550 status = ice_replay_pre_init(hw, sw); 5551 if (status) 5552 return status; 5553 } 5554 /* Replay per VSI all RSS configurations */ 5555 status = ice_replay_rss_cfg(hw, vsi_handle); 5556 if (status) 5557 return status; 5558 /* Replay per VSI all filters */ 5559 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle); 5560 if (!status) 5561 status = ice_replay_vsi_agg(hw, vsi_handle); 5562 return status; 5563 } 5564 5565 /** 5566 * ice_replay_post - post replay configuration cleanup 5567 * @hw: pointer to the HW struct 5568 * 5569 * Post replay cleanup. 5570 */ 5571 void ice_replay_post(struct ice_hw *hw) 5572 { 5573 /* Delete old entries from replay filter list head */ 5574 ice_rm_all_sw_replay_rule_info(hw); 5575 ice_sched_replay_agg(hw); 5576 } 5577 5578 /** 5579 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 5580 * @hw: ptr to the hardware info 5581 * @reg: offset of 64 bit HW register to read from 5582 * @prev_stat_loaded: bool to specify if previous stats are loaded 5583 * @prev_stat: ptr to previous loaded stat value 5584 * @cur_stat: ptr to current stat value 5585 */ 5586 void 5587 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5588 u64 *prev_stat, u64 *cur_stat) 5589 { 5590 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 5591 5592 /* device stats are not reset at PFR, they likely will not be zeroed 5593 * when the driver starts. Thus, save the value from the first read 5594 * without adding to the statistic value so that we report stats which 5595 * count up from zero. 5596 */ 5597 if (!prev_stat_loaded) { 5598 *prev_stat = new_data; 5599 return; 5600 } 5601 5602 /* Calculate the difference between the new and old values, and then 5603 * add it to the software stat value. 5604 */ 5605 if (new_data >= *prev_stat) 5606 *cur_stat += new_data - *prev_stat; 5607 else 5608 /* to manage the potential roll-over */ 5609 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 5610 5611 /* Update the previously stored value to prepare for next read */ 5612 *prev_stat = new_data; 5613 } 5614 5615 /** 5616 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 5617 * @hw: ptr to the hardware info 5618 * @reg: offset of HW register to read from 5619 * @prev_stat_loaded: bool to specify if previous stats are loaded 5620 * @prev_stat: ptr to previous loaded stat value 5621 * @cur_stat: ptr to current stat value 5622 */ 5623 void 5624 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 5625 u64 *prev_stat, u64 *cur_stat) 5626 { 5627 u32 new_data; 5628 5629 new_data = rd32(hw, reg); 5630 5631 /* device stats are not reset at PFR, they likely will not be zeroed 5632 * when the driver starts. Thus, save the value from the first read 5633 * without adding to the statistic value so that we report stats which 5634 * count up from zero. 5635 */ 5636 if (!prev_stat_loaded) { 5637 *prev_stat = new_data; 5638 return; 5639 } 5640 5641 /* Calculate the difference between the new and old values, and then 5642 * add it to the software stat value. 5643 */ 5644 if (new_data >= *prev_stat) 5645 *cur_stat += new_data - *prev_stat; 5646 else 5647 /* to manage the potential roll-over */ 5648 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 5649 5650 /* Update the previously stored value to prepare for next read */ 5651 *prev_stat = new_data; 5652 } 5653 5654 /** 5655 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values 5656 * @hw: ptr to the hardware info 5657 * @vsi_handle: VSI handle 5658 * @prev_stat_loaded: bool to specify if the previous stat values are loaded 5659 * @cur_stats: ptr to current stats structure 5660 * 5661 * The GLV_REPC statistic register actually tracks two 16bit statistics, and 5662 * thus cannot be read using the normal ice_stat_update32 function. 5663 * 5664 * Read the GLV_REPC register associated with the given VSI, and update the 5665 * rx_no_desc and rx_error values in the ice_eth_stats structure. 5666 * 5667 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be 5668 * cleared each time it's read. 5669 * 5670 * Note that the GLV_RDPC register also counts the causes that would trigger 5671 * GLV_REPC. However, it does not give the finer grained detail about why the 5672 * packets are being dropped. The GLV_REPC values can be used to distinguish 5673 * whether Rx packets are dropped due to errors or due to no available 5674 * descriptors. 5675 */ 5676 void 5677 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded, 5678 struct ice_eth_stats *cur_stats) 5679 { 5680 u16 vsi_num, no_desc, error_cnt; 5681 u32 repc; 5682 5683 if (!ice_is_vsi_valid(hw, vsi_handle)) 5684 return; 5685 5686 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); 5687 5688 /* If we haven't loaded stats yet, just clear the current value */ 5689 if (!prev_stat_loaded) { 5690 wr32(hw, GLV_REPC(vsi_num), 0); 5691 return; 5692 } 5693 5694 repc = rd32(hw, GLV_REPC(vsi_num)); 5695 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S; 5696 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S; 5697 5698 /* Clear the count by writing to the stats register */ 5699 wr32(hw, GLV_REPC(vsi_num), 0); 5700 5701 cur_stats->rx_no_desc += no_desc; 5702 cur_stats->rx_errors += error_cnt; 5703 } 5704 5705 /** 5706 * ice_aq_alternate_write 5707 * @hw: pointer to the hardware structure 5708 * @reg_addr0: address of first dword to be written 5709 * @reg_val0: value to be written under 'reg_addr0' 5710 * @reg_addr1: address of second dword to be written 5711 * @reg_val1: value to be written under 'reg_addr1' 5712 * 5713 * Write one or two dwords to alternate structure. Fields are indicated 5714 * by 'reg_addr0' and 'reg_addr1' register numbers. 5715 */ 5716 enum ice_status 5717 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0, 5718 u32 reg_addr1, u32 reg_val1) 5719 { 5720 struct ice_aqc_read_write_alt_direct *cmd; 5721 struct ice_aq_desc desc; 5722 enum ice_status status; 5723 5724 cmd = &desc.params.read_write_alt_direct; 5725 5726 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct); 5727 cmd->dword0_addr = CPU_TO_LE32(reg_addr0); 5728 cmd->dword1_addr = CPU_TO_LE32(reg_addr1); 5729 cmd->dword0_value = CPU_TO_LE32(reg_val0); 5730 cmd->dword1_value = CPU_TO_LE32(reg_val1); 5731 5732 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5733 5734 return status; 5735 } 5736 5737 /** 5738 * ice_aq_alternate_read 5739 * @hw: pointer to the hardware structure 5740 * @reg_addr0: address of first dword to be read 5741 * @reg_val0: pointer for data read from 'reg_addr0' 5742 * @reg_addr1: address of second dword to be read 5743 * @reg_val1: pointer for data read from 'reg_addr1' 5744 * 5745 * Read one or two dwords from alternate structure. Fields are indicated 5746 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 5747 * is not passed then only register at 'reg_addr0' is read. 5748 */ 5749 enum ice_status 5750 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0, 5751 u32 reg_addr1, u32 *reg_val1) 5752 { 5753 struct ice_aqc_read_write_alt_direct *cmd; 5754 struct ice_aq_desc desc; 5755 enum ice_status status; 5756 5757 cmd = &desc.params.read_write_alt_direct; 5758 5759 if (!reg_val0) 5760 return ICE_ERR_PARAM; 5761 5762 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct); 5763 cmd->dword0_addr = CPU_TO_LE32(reg_addr0); 5764 cmd->dword1_addr = CPU_TO_LE32(reg_addr1); 5765 5766 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5767 5768 if (status == ICE_SUCCESS) { 5769 *reg_val0 = LE32_TO_CPU(cmd->dword0_value); 5770 5771 if (reg_val1) 5772 *reg_val1 = LE32_TO_CPU(cmd->dword1_value); 5773 } 5774 5775 return status; 5776 } 5777 5778 /** 5779 * ice_aq_alternate_write_done 5780 * @hw: pointer to the HW structure. 5781 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS 5782 * @reset_needed: indicates the SW should trigger GLOBAL reset 5783 * 5784 * Indicates to the FW that alternate structures have been changed. 5785 */ 5786 enum ice_status 5787 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed) 5788 { 5789 struct ice_aqc_done_alt_write *cmd; 5790 struct ice_aq_desc desc; 5791 enum ice_status status; 5792 5793 cmd = &desc.params.done_alt_write; 5794 5795 if (!reset_needed) 5796 return ICE_ERR_PARAM; 5797 5798 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write); 5799 cmd->flags = bios_mode; 5800 5801 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5802 if (!status) 5803 *reset_needed = (LE16_TO_CPU(cmd->flags) & 5804 ICE_AQC_RESP_RESET_NEEDED) != 0; 5805 5806 return status; 5807 } 5808 5809 /** 5810 * ice_aq_alternate_clear 5811 * @hw: pointer to the HW structure. 5812 * 5813 * Clear the alternate structures of the port from which the function 5814 * is called. 5815 */ 5816 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw) 5817 { 5818 struct ice_aq_desc desc; 5819 enum ice_status status; 5820 5821 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write); 5822 5823 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 5824 5825 return status; 5826 } 5827 5828 /** 5829 * ice_sched_query_elem - query element information from HW 5830 * @hw: pointer to the HW struct 5831 * @node_teid: node TEID to be queried 5832 * @buf: buffer to element information 5833 * 5834 * This function queries HW element information 5835 */ 5836 enum ice_status 5837 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 5838 struct ice_aqc_txsched_elem_data *buf) 5839 { 5840 u16 buf_size, num_elem_ret = 0; 5841 enum ice_status status; 5842 5843 buf_size = sizeof(*buf); 5844 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM); 5845 buf->node_teid = CPU_TO_LE32(node_teid); 5846 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 5847 NULL); 5848 if (status != ICE_SUCCESS || num_elem_ret != 1) 5849 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 5850 return status; 5851 } 5852 5853 /** 5854 * ice_get_fw_mode - returns FW mode 5855 * @hw: pointer to the HW struct 5856 */ 5857 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw) 5858 { 5859 #define ICE_FW_MODE_DBG_M BIT(0) 5860 #define ICE_FW_MODE_REC_M BIT(1) 5861 #define ICE_FW_MODE_ROLLBACK_M BIT(2) 5862 u32 fw_mode; 5863 5864 /* check the current FW mode */ 5865 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M; 5866 if (fw_mode & ICE_FW_MODE_DBG_M) 5867 return ICE_FW_MODE_DBG; 5868 else if (fw_mode & ICE_FW_MODE_REC_M) 5869 return ICE_FW_MODE_REC; 5870 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M) 5871 return ICE_FW_MODE_ROLLBACK; 5872 else 5873 return ICE_FW_MODE_NORMAL; 5874 } 5875 5876 /** 5877 * ice_get_cur_lldp_persist_status 5878 * @hw: pointer to the HW struct 5879 * @lldp_status: return value of LLDP persistent status 5880 * 5881 * Get the current status of LLDP persistent 5882 */ 5883 enum ice_status 5884 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status) 5885 { 5886 struct ice_port_info *pi = hw->port_info; 5887 enum ice_status ret; 5888 __le32 raw_data; 5889 u32 data, mask; 5890 5891 if (!lldp_status) 5892 return ICE_ERR_BAD_PTR; 5893 5894 ret = ice_acquire_nvm(hw, ICE_RES_READ); 5895 if (ret) 5896 return ret; 5897 5898 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID, 5899 ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET, 5900 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, 5901 false, true, NULL); 5902 if (!ret) { 5903 data = LE32_TO_CPU(raw_data); 5904 mask = ICE_AQC_NVM_LLDP_STATUS_M << 5905 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); 5906 data = data & mask; 5907 *lldp_status = data >> 5908 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); 5909 } 5910 5911 ice_release_nvm(hw); 5912 5913 return ret; 5914 } 5915 5916 /** 5917 * ice_get_dflt_lldp_persist_status 5918 * @hw: pointer to the HW struct 5919 * @lldp_status: return value of LLDP persistent status 5920 * 5921 * Get the default status of LLDP persistent 5922 */ 5923 enum ice_status 5924 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status) 5925 { 5926 struct ice_port_info *pi = hw->port_info; 5927 u32 data, mask, loc_data, loc_data_tmp; 5928 enum ice_status ret; 5929 __le16 loc_raw_data; 5930 __le32 raw_data; 5931 5932 if (!lldp_status) 5933 return ICE_ERR_BAD_PTR; 5934 5935 ret = ice_acquire_nvm(hw, ICE_RES_READ); 5936 if (ret) 5937 return ret; 5938 5939 /* Read the offset of EMP_SR_PTR */ 5940 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, 5941 ICE_AQC_NVM_EMP_SR_PTR_OFFSET, 5942 ICE_AQC_NVM_EMP_SR_PTR_RD_LEN, 5943 &loc_raw_data, false, true, NULL); 5944 if (ret) 5945 goto exit; 5946 5947 loc_data = LE16_TO_CPU(loc_raw_data); 5948 if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) { 5949 loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M; 5950 loc_data *= ICE_AQC_NVM_SECTOR_UNIT; 5951 } else { 5952 loc_data *= ICE_AQC_NVM_WORD_UNIT; 5953 } 5954 5955 /* Read the offset of LLDP configuration pointer */ 5956 loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET; 5957 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data, 5958 ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data, 5959 false, true, NULL); 5960 if (ret) 5961 goto exit; 5962 5963 loc_data_tmp = LE16_TO_CPU(loc_raw_data); 5964 loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT; 5965 loc_data += loc_data_tmp; 5966 5967 /* We need to skip LLDP configuration section length (2 bytes) */ 5968 loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN; 5969 5970 /* Read the LLDP Default Configure */ 5971 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data, 5972 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false, 5973 true, NULL); 5974 if (!ret) { 5975 data = LE32_TO_CPU(raw_data); 5976 mask = ICE_AQC_NVM_LLDP_STATUS_M << 5977 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); 5978 data = data & mask; 5979 *lldp_status = data >> 5980 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport); 5981 } 5982 5983 exit: 5984 ice_release_nvm(hw); 5985 5986 return ret; 5987 } 5988 5989 /** 5990 * ice_aq_read_i2c 5991 * @hw: pointer to the hw struct 5992 * @topo_addr: topology address for a device to communicate with 5993 * @bus_addr: 7-bit I2C bus address 5994 * @addr: I2C memory address (I2C offset) with up to 16 bits 5995 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size, 5996 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes) 5997 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device 5998 * @cd: pointer to command details structure or NULL 5999 * 6000 * Read I2C (0x06E2) 6001 */ 6002 enum ice_status 6003 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 6004 u16 bus_addr, __le16 addr, u8 params, u8 *data, 6005 struct ice_sq_cd *cd) 6006 { 6007 struct ice_aq_desc desc = { 0 }; 6008 struct ice_aqc_i2c *cmd; 6009 enum ice_status status; 6010 u8 data_size; 6011 6012 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c); 6013 cmd = &desc.params.read_write_i2c; 6014 6015 if (!data) 6016 return ICE_ERR_PARAM; 6017 6018 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S; 6019 6020 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr); 6021 cmd->topo_addr = topo_addr; 6022 cmd->i2c_params = params; 6023 cmd->i2c_addr = addr; 6024 6025 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 6026 if (!status) { 6027 struct ice_aqc_read_i2c_resp *resp; 6028 u8 i; 6029 6030 resp = &desc.params.read_i2c_resp; 6031 for (i = 0; i < data_size; i++) { 6032 *data = resp->i2c_data[i]; 6033 data++; 6034 } 6035 } 6036 6037 return status; 6038 } 6039 6040 /** 6041 * ice_aq_write_i2c 6042 * @hw: pointer to the hw struct 6043 * @topo_addr: topology address for a device to communicate with 6044 * @bus_addr: 7-bit I2C bus address 6045 * @addr: I2C memory address (I2C offset) with up to 16 bits 6046 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes) 6047 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device 6048 * @cd: pointer to command details structure or NULL 6049 * 6050 * Write I2C (0x06E3) 6051 */ 6052 enum ice_status 6053 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, 6054 u16 bus_addr, __le16 addr, u8 params, u8 *data, 6055 struct ice_sq_cd *cd) 6056 { 6057 struct ice_aq_desc desc = { 0 }; 6058 struct ice_aqc_i2c *cmd; 6059 u8 i, data_size; 6060 6061 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c); 6062 cmd = &desc.params.read_write_i2c; 6063 6064 data_size = (params & ICE_AQC_I2C_DATA_SIZE_M) >> ICE_AQC_I2C_DATA_SIZE_S; 6065 6066 /* data_size limited to 4 */ 6067 if (data_size > 4) 6068 return ICE_ERR_PARAM; 6069 6070 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr); 6071 cmd->topo_addr = topo_addr; 6072 cmd->i2c_params = params; 6073 cmd->i2c_addr = addr; 6074 6075 for (i = 0; i < data_size; i++) { 6076 cmd->i2c_data[i] = *data; 6077 data++; 6078 } 6079 6080 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 6081 } 6082 6083 /** 6084 * ice_aq_set_gpio 6085 * @hw: pointer to the hw struct 6086 * @gpio_ctrl_handle: GPIO controller node handle 6087 * @pin_idx: IO Number of the GPIO that needs to be set 6088 * @value: SW provide IO value to set in the LSB 6089 * @cd: pointer to command details structure or NULL 6090 * 6091 * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology 6092 */ 6093 enum ice_status 6094 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value, 6095 struct ice_sq_cd *cd) 6096 { 6097 struct ice_aqc_gpio *cmd; 6098 struct ice_aq_desc desc; 6099 6100 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio); 6101 cmd = &desc.params.read_write_gpio; 6102 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); 6103 cmd->gpio_num = pin_idx; 6104 cmd->gpio_val = value ? 1 : 0; 6105 6106 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 6107 } 6108 6109 /** 6110 * ice_aq_get_gpio 6111 * @hw: pointer to the hw struct 6112 * @gpio_ctrl_handle: GPIO controller node handle 6113 * @pin_idx: IO Number of the GPIO that needs to be set 6114 * @value: IO value read 6115 * @cd: pointer to command details structure or NULL 6116 * 6117 * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of 6118 * the topology 6119 */ 6120 enum ice_status 6121 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, 6122 bool *value, struct ice_sq_cd *cd) 6123 { 6124 struct ice_aqc_gpio *cmd; 6125 struct ice_aq_desc desc; 6126 enum ice_status status; 6127 6128 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio); 6129 cmd = &desc.params.read_write_gpio; 6130 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle); 6131 cmd->gpio_num = pin_idx; 6132 6133 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 6134 if (status) 6135 return status; 6136 6137 *value = !!cmd->gpio_val; 6138 return ICE_SUCCESS; 6139 } 6140 6141 /** 6142 * ice_is_fw_api_min_ver 6143 * @hw: pointer to the hardware structure 6144 * @maj: major version 6145 * @min: minor version 6146 * @patch: patch version 6147 * 6148 * Checks if the firmware is minimum version 6149 */ 6150 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch) 6151 { 6152 if (hw->api_maj_ver == maj) { 6153 if (hw->api_min_ver > min) 6154 return true; 6155 if (hw->api_min_ver == min && hw->api_patch >= patch) 6156 return true; 6157 } else if (hw->api_maj_ver > maj) { 6158 return true; 6159 } 6160 6161 return false; 6162 } 6163 6164 /** 6165 * ice_is_fw_min_ver 6166 * @hw: pointer to the hardware structure 6167 * @branch: branch version 6168 * @maj: major version 6169 * @min: minor version 6170 * @patch: patch version 6171 * 6172 * Checks if the firmware is minimum version 6173 */ 6174 static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min, 6175 u8 patch) 6176 { 6177 if (hw->fw_branch == branch) { 6178 if (hw->fw_maj_ver > maj) 6179 return true; 6180 if (hw->fw_maj_ver == maj) { 6181 if (hw->fw_min_ver > min) 6182 return true; 6183 if (hw->fw_min_ver == min && hw->fw_patch >= patch) 6184 return true; 6185 } 6186 } else if (hw->fw_branch > branch) { 6187 return true; 6188 } 6189 6190 return false; 6191 } 6192 6193 /** 6194 * ice_fw_supports_link_override 6195 * @hw: pointer to the hardware structure 6196 * 6197 * Checks if the firmware supports link override 6198 */ 6199 bool ice_fw_supports_link_override(struct ice_hw *hw) 6200 { 6201 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ, 6202 ICE_FW_API_LINK_OVERRIDE_MIN, 6203 ICE_FW_API_LINK_OVERRIDE_PATCH); 6204 } 6205 6206 /** 6207 * ice_get_link_default_override 6208 * @ldo: pointer to the link default override struct 6209 * @pi: pointer to the port info struct 6210 * 6211 * Gets the link default override for a port 6212 */ 6213 enum ice_status 6214 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 6215 struct ice_port_info *pi) 6216 { 6217 u16 i, tlv, tlv_len, tlv_start, buf, offset; 6218 struct ice_hw *hw = pi->hw; 6219 enum ice_status status; 6220 6221 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 6222 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 6223 if (status) { 6224 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 6225 return status; 6226 } 6227 6228 /* Each port has its own config; calculate for our port */ 6229 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 6230 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 6231 6232 /* link options first */ 6233 status = ice_read_sr_word(hw, tlv_start, &buf); 6234 if (status) { 6235 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 6236 return status; 6237 } 6238 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 6239 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 6240 ICE_LINK_OVERRIDE_PHY_CFG_S; 6241 6242 /* link PHY config */ 6243 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 6244 status = ice_read_sr_word(hw, offset, &buf); 6245 if (status) { 6246 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 6247 return status; 6248 } 6249 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 6250 6251 /* PHY types low */ 6252 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 6253 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 6254 status = ice_read_sr_word(hw, (offset + i), &buf); 6255 if (status) { 6256 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 6257 return status; 6258 } 6259 /* shift 16 bits at a time to fill 64 bits */ 6260 ldo->phy_type_low |= ((u64)buf << (i * 16)); 6261 } 6262 6263 /* PHY types high */ 6264 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 6265 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 6266 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 6267 status = ice_read_sr_word(hw, (offset + i), &buf); 6268 if (status) { 6269 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 6270 return status; 6271 } 6272 /* shift 16 bits at a time to fill 64 bits */ 6273 ldo->phy_type_high |= ((u64)buf << (i * 16)); 6274 } 6275 6276 return status; 6277 } 6278 6279 /** 6280 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 6281 * @caps: get PHY capability data 6282 */ 6283 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 6284 { 6285 if (caps->caps & ICE_AQC_PHY_AN_MODE || 6286 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 6287 ICE_AQC_PHY_AN_EN_CLAUSE73 | 6288 ICE_AQC_PHY_AN_EN_CLAUSE37)) 6289 return true; 6290 6291 return false; 6292 } 6293 6294 /** 6295 * ice_is_fw_health_report_supported 6296 * @hw: pointer to the hardware structure 6297 * 6298 * Return true if firmware supports health status reports, 6299 * false otherwise 6300 */ 6301 bool ice_is_fw_health_report_supported(struct ice_hw *hw) 6302 { 6303 if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ) 6304 return true; 6305 6306 if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) { 6307 if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN) 6308 return true; 6309 if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN && 6310 hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH) 6311 return true; 6312 } 6313 6314 return false; 6315 } 6316 6317 /** 6318 * ice_aq_set_health_status_config - Configure FW health events 6319 * @hw: pointer to the HW struct 6320 * @event_source: type of diagnostic events to enable 6321 * @cd: pointer to command details structure or NULL 6322 * 6323 * Configure the health status event types that the firmware will send to this 6324 * PF. The supported event types are: PF-specific, all PFs, and global 6325 */ 6326 enum ice_status 6327 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source, 6328 struct ice_sq_cd *cd) 6329 { 6330 struct ice_aqc_set_health_status_config *cmd; 6331 struct ice_aq_desc desc; 6332 6333 cmd = &desc.params.set_health_status_config; 6334 6335 ice_fill_dflt_direct_cmd_desc(&desc, 6336 ice_aqc_opc_set_health_status_config); 6337 6338 cmd->event_source = event_source; 6339 6340 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 6341 } 6342 6343 /** 6344 * ice_aq_get_port_options 6345 * @hw: pointer to the hw struct 6346 * @options: buffer for the resultant port options 6347 * @option_count: input - size of the buffer in port options structures, 6348 * output - number of returned port options 6349 * @lport: logical port to call the command with (optional) 6350 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 6351 * when PF owns more than 1 port it must be true 6352 * @active_option_idx: index of active port option in returned buffer 6353 * @active_option_valid: active option in returned buffer is valid 6354 * @pending_option_idx: index of pending port option in returned buffer 6355 * @pending_option_valid: pending option in returned buffer is valid 6356 * 6357 * Calls Get Port Options AQC (0x06ea) and verifies result. 6358 */ 6359 enum ice_status 6360 ice_aq_get_port_options(struct ice_hw *hw, 6361 struct ice_aqc_get_port_options_elem *options, 6362 u8 *option_count, u8 lport, bool lport_valid, 6363 u8 *active_option_idx, bool *active_option_valid, 6364 u8 *pending_option_idx, bool *pending_option_valid) 6365 { 6366 struct ice_aqc_get_port_options *cmd; 6367 struct ice_aq_desc desc; 6368 enum ice_status status; 6369 u8 i; 6370 6371 /* options buffer shall be able to hold max returned options */ 6372 if (*option_count < ICE_AQC_PORT_OPT_COUNT_M) 6373 return ICE_ERR_PARAM; 6374 6375 cmd = &desc.params.get_port_options; 6376 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options); 6377 6378 cmd->lport_num = lport; 6379 cmd->lport_num_valid = lport_valid; 6380 6381 status = ice_aq_send_cmd(hw, &desc, options, 6382 *option_count * sizeof(*options), NULL); 6383 if (status != ICE_SUCCESS) 6384 return status; 6385 6386 /* verify direct FW response & set output parameters */ 6387 *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M; 6388 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count); 6389 *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID; 6390 if (*active_option_valid) { 6391 *active_option_idx = cmd->port_options & 6392 ICE_AQC_PORT_OPT_ACTIVE_M; 6393 if (*active_option_idx > (*option_count - 1)) 6394 return ICE_ERR_OUT_OF_RANGE; 6395 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n", 6396 *active_option_idx); 6397 } 6398 6399 *pending_option_valid = cmd->pending_port_option_status & 6400 ICE_AQC_PENDING_PORT_OPT_VALID; 6401 if (*pending_option_valid) { 6402 *pending_option_idx = cmd->pending_port_option_status & 6403 ICE_AQC_PENDING_PORT_OPT_IDX_M; 6404 if (*pending_option_idx > (*option_count - 1)) 6405 return ICE_ERR_OUT_OF_RANGE; 6406 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n", 6407 *pending_option_idx); 6408 } 6409 6410 /* mask output options fields */ 6411 for (i = 0; i < *option_count; i++) { 6412 options[i].pmd &= ICE_AQC_PORT_OPT_PMD_COUNT_M; 6413 options[i].max_lane_speed &= ICE_AQC_PORT_OPT_MAX_LANE_M; 6414 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n", 6415 options[i].pmd, options[i].max_lane_speed); 6416 } 6417 6418 return ICE_SUCCESS; 6419 } 6420 6421 /** 6422 * ice_aq_set_port_option 6423 * @hw: pointer to the hw struct 6424 * @lport: logical port to call the command with 6425 * @lport_valid: when false, FW uses port owned by the PF instead of lport, 6426 * when PF owns more than 1 port it must be true 6427 * @new_option: new port option to be written 6428 * 6429 * Calls Set Port Options AQC (0x06eb). 6430 */ 6431 enum ice_status 6432 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, 6433 u8 new_option) 6434 { 6435 struct ice_aqc_set_port_option *cmd; 6436 struct ice_aq_desc desc; 6437 6438 if (new_option >= ICE_AQC_PORT_OPT_COUNT_M) 6439 return ICE_ERR_PARAM; 6440 6441 cmd = &desc.params.set_port_option; 6442 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option); 6443 6444 cmd->lport_num = lport; 6445 6446 cmd->lport_num_valid = lport_valid; 6447 cmd->selected_port_option = new_option; 6448 6449 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6450 } 6451 6452 /** 6453 * ice_aq_set_lldp_mib - Set the LLDP MIB 6454 * @hw: pointer to the HW struct 6455 * @mib_type: Local, Remote or both Local and Remote MIBs 6456 * @buf: pointer to the caller-supplied buffer to store the MIB block 6457 * @buf_size: size of the buffer (in bytes) 6458 * @cd: pointer to command details structure or NULL 6459 * 6460 * Set the LLDP MIB. (0x0A08) 6461 */ 6462 enum ice_status 6463 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 6464 struct ice_sq_cd *cd) 6465 { 6466 struct ice_aqc_lldp_set_local_mib *cmd; 6467 struct ice_aq_desc desc; 6468 6469 cmd = &desc.params.lldp_set_mib; 6470 6471 if (buf_size == 0 || !buf) 6472 return ICE_ERR_PARAM; 6473 6474 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 6475 6476 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD); 6477 desc.datalen = CPU_TO_LE16(buf_size); 6478 6479 cmd->type = mib_type; 6480 cmd->length = CPU_TO_LE16(buf_size); 6481 6482 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 6483 } 6484 6485 /** 6486 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 6487 * @hw: pointer to HW struct 6488 */ 6489 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 6490 { 6491 if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC) 6492 return false; 6493 6494 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ, 6495 ICE_FW_API_LLDP_FLTR_MIN, 6496 ICE_FW_API_LLDP_FLTR_PATCH); 6497 } 6498 6499 /** 6500 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 6501 * @hw: pointer to HW struct 6502 * @vsi_num: absolute HW index for VSI 6503 * @add: boolean for if adding or removing a filter 6504 */ 6505 enum ice_status 6506 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 6507 { 6508 struct ice_aqc_lldp_filter_ctrl *cmd; 6509 struct ice_aq_desc desc; 6510 6511 cmd = &desc.params.lldp_filter_ctrl; 6512 6513 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 6514 6515 if (add) 6516 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 6517 else 6518 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 6519 6520 cmd->vsi_num = CPU_TO_LE16(vsi_num); 6521 6522 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6523 } 6524 6525 /** 6526 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request 6527 * @hw: pointer to HW struct 6528 */ 6529 enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw) 6530 { 6531 struct ice_aq_desc desc; 6532 6533 ice_fill_dflt_direct_cmd_desc(&desc, ice_execute_pending_lldp_mib); 6534 6535 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 6536 } 6537 6538 /** 6539 * ice_fw_supports_report_dflt_cfg 6540 * @hw: pointer to the hardware structure 6541 * 6542 * Checks if the firmware supports report default configuration 6543 */ 6544 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 6545 { 6546 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ, 6547 ICE_FW_API_REPORT_DFLT_CFG_MIN, 6548 ICE_FW_API_REPORT_DFLT_CFG_PATCH); 6549 } 6550 6551 /* each of the indexes into the following array match the speed of a return 6552 * value from the list of AQ returned speeds like the range: 6553 * ICE_AQ_LINK_SPEED_10MB .. ICE_AQ_LINK_SPEED_100GB excluding 6554 * ICE_AQ_LINK_SPEED_UNKNOWN which is BIT(15) The array is defined as 15 6555 * elements long because the link_speed returned by the firmware is a 16 bit 6556 * value, but is indexed by [fls(speed) - 1] 6557 */ 6558 static const u32 ice_aq_to_link_speed[] = { 6559 ICE_LINK_SPEED_10MBPS, /* BIT(0) */ 6560 ICE_LINK_SPEED_100MBPS, 6561 ICE_LINK_SPEED_1000MBPS, 6562 ICE_LINK_SPEED_2500MBPS, 6563 ICE_LINK_SPEED_5000MBPS, 6564 ICE_LINK_SPEED_10000MBPS, 6565 ICE_LINK_SPEED_20000MBPS, 6566 ICE_LINK_SPEED_25000MBPS, 6567 ICE_LINK_SPEED_40000MBPS, 6568 ICE_LINK_SPEED_50000MBPS, 6569 ICE_LINK_SPEED_100000MBPS, /* BIT(10) */ 6570 }; 6571 6572 /** 6573 * ice_get_link_speed - get integer speed from table 6574 * @index: array index from fls(aq speed) - 1 6575 * 6576 * Returns: u32 value containing integer speed 6577 */ 6578 u32 ice_get_link_speed(u16 index) 6579 { 6580 if (index >= ARRAY_SIZE(ice_aq_to_link_speed)) 6581 return ICE_LINK_SPEED_UNKNOWN; 6582 6583 return ice_aq_to_link_speed[index]; 6584 } 6585 6586 /** 6587 * ice_fw_supports_fec_dis_auto 6588 * @hw: pointer to the hardware structure 6589 * 6590 * Checks if the firmware supports FEC disable in Auto FEC mode 6591 */ 6592 bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw) 6593 { 6594 return ice_is_fw_min_ver(hw, ICE_FW_FEC_DIS_AUTO_BRANCH, 6595 ICE_FW_FEC_DIS_AUTO_MAJ, 6596 ICE_FW_FEC_DIS_AUTO_MIN, 6597 ICE_FW_FEC_DIS_AUTO_PATCH); 6598 } 6599 6600 /** 6601 * ice_is_fw_auto_drop_supported 6602 * @hw: pointer to the hardware structure 6603 * 6604 * Checks if the firmware supports auto drop feature 6605 */ 6606 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw) 6607 { 6608 if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ && 6609 hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN) 6610 return true; 6611 return false; 6612 } 6613 6614