1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_lib.h" 6 #include "ice_sched.h" 7 #include "ice_adminq_cmd.h" 8 #include "ice_flow.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 12 /** 13 * ice_set_mac_type - Sets MAC type 14 * @hw: pointer to the HW structure 15 * 16 * This function sets the MAC type of the adapter based on the 17 * vendor ID and device ID stored in the HW structure. 18 */ 19 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 20 { 21 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 22 return ICE_ERR_DEVICE_NOT_SUPPORTED; 23 24 switch (hw->device_id) { 25 case ICE_DEV_ID_E810C_BACKPLANE: 26 case ICE_DEV_ID_E810C_QSFP: 27 case ICE_DEV_ID_E810C_SFP: 28 case ICE_DEV_ID_E810_XXV_BACKPLANE: 29 case ICE_DEV_ID_E810_XXV_QSFP: 30 case ICE_DEV_ID_E810_XXV_SFP: 31 hw->mac_type = ICE_MAC_E810; 32 break; 33 case ICE_DEV_ID_E823C_10G_BASE_T: 34 case ICE_DEV_ID_E823C_BACKPLANE: 35 case ICE_DEV_ID_E823C_QSFP: 36 case ICE_DEV_ID_E823C_SFP: 37 case ICE_DEV_ID_E823C_SGMII: 38 case ICE_DEV_ID_E822C_10G_BASE_T: 39 case ICE_DEV_ID_E822C_BACKPLANE: 40 case ICE_DEV_ID_E822C_QSFP: 41 case ICE_DEV_ID_E822C_SFP: 42 case ICE_DEV_ID_E822C_SGMII: 43 case ICE_DEV_ID_E822L_10G_BASE_T: 44 case ICE_DEV_ID_E822L_BACKPLANE: 45 case ICE_DEV_ID_E822L_SFP: 46 case ICE_DEV_ID_E822L_SGMII: 47 case ICE_DEV_ID_E823L_10G_BASE_T: 48 case ICE_DEV_ID_E823L_1GBE: 49 case ICE_DEV_ID_E823L_BACKPLANE: 50 case ICE_DEV_ID_E823L_QSFP: 51 case ICE_DEV_ID_E823L_SFP: 52 hw->mac_type = ICE_MAC_GENERIC; 53 break; 54 default: 55 hw->mac_type = ICE_MAC_UNKNOWN; 56 break; 57 } 58 59 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 60 return 0; 61 } 62 63 /** 64 * ice_is_e810 65 * @hw: pointer to the hardware structure 66 * 67 * returns true if the device is E810 based, false if not. 68 */ 69 bool ice_is_e810(struct ice_hw *hw) 70 { 71 return hw->mac_type == ICE_MAC_E810; 72 } 73 74 /** 75 * ice_clear_pf_cfg - Clear PF configuration 76 * @hw: pointer to the hardware structure 77 * 78 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 79 * configuration, flow director filters, etc.). 80 */ 81 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 82 { 83 struct ice_aq_desc desc; 84 85 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 86 87 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 88 } 89 90 /** 91 * ice_aq_manage_mac_read - manage MAC address read command 92 * @hw: pointer to the HW struct 93 * @buf: a virtual buffer to hold the manage MAC read response 94 * @buf_size: Size of the virtual buffer 95 * @cd: pointer to command details structure or NULL 96 * 97 * This function is used to return per PF station MAC address (0x0107). 98 * NOTE: Upon successful completion of this command, MAC address information 99 * is returned in user specified buffer. Please interpret user specified 100 * buffer as "manage_mac_read" response. 101 * Response such as various MAC addresses are stored in HW struct (port.mac) 102 * ice_discover_dev_caps is expected to be called before this function is 103 * called. 104 */ 105 static enum ice_status 106 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 107 struct ice_sq_cd *cd) 108 { 109 struct ice_aqc_manage_mac_read_resp *resp; 110 struct ice_aqc_manage_mac_read *cmd; 111 struct ice_aq_desc desc; 112 enum ice_status status; 113 u16 flags; 114 u8 i; 115 116 cmd = &desc.params.mac_read; 117 118 if (buf_size < sizeof(*resp)) 119 return ICE_ERR_BUF_TOO_SHORT; 120 121 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 122 123 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 124 if (status) 125 return status; 126 127 resp = buf; 128 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 129 130 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 131 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 132 return ICE_ERR_CFG; 133 } 134 135 /* A single port can report up to two (LAN and WoL) addresses */ 136 for (i = 0; i < cmd->num_addr; i++) 137 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 138 ether_addr_copy(hw->port_info->mac.lan_addr, 139 resp[i].mac_addr); 140 ether_addr_copy(hw->port_info->mac.perm_addr, 141 resp[i].mac_addr); 142 break; 143 } 144 145 return 0; 146 } 147 148 /** 149 * ice_aq_get_phy_caps - returns PHY capabilities 150 * @pi: port information structure 151 * @qual_mods: report qualified modules 152 * @report_mode: report mode capabilities 153 * @pcaps: structure for PHY capabilities to be filled 154 * @cd: pointer to command details structure or NULL 155 * 156 * Returns the various PHY capabilities supported on the Port (0x0600) 157 */ 158 enum ice_status 159 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 160 struct ice_aqc_get_phy_caps_data *pcaps, 161 struct ice_sq_cd *cd) 162 { 163 struct ice_aqc_get_phy_caps *cmd; 164 u16 pcaps_size = sizeof(*pcaps); 165 struct ice_aq_desc desc; 166 enum ice_status status; 167 struct ice_hw *hw; 168 169 cmd = &desc.params.get_phy; 170 171 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 172 return ICE_ERR_PARAM; 173 hw = pi->hw; 174 175 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 176 !ice_fw_supports_report_dflt_cfg(hw)) 177 return ICE_ERR_PARAM; 178 179 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 180 181 if (qual_mods) 182 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 183 184 cmd->param0 |= cpu_to_le16(report_mode); 185 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 186 187 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n", 188 report_mode); 189 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 190 (unsigned long long)le64_to_cpu(pcaps->phy_type_low)); 191 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 192 (unsigned long long)le64_to_cpu(pcaps->phy_type_high)); 193 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps); 194 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 195 pcaps->low_power_ctrl_an); 196 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap); 197 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", 198 pcaps->eeer_value); 199 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n", 200 pcaps->link_fec_options); 201 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n", 202 pcaps->module_compliance_enforcement); 203 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n", 204 pcaps->extended_compliance_code); 205 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n", 206 pcaps->module_type[0]); 207 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n", 208 pcaps->module_type[1]); 209 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", 210 pcaps->module_type[2]); 211 212 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 213 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 214 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 215 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 216 sizeof(pi->phy.link_info.module_type)); 217 } 218 219 return status; 220 } 221 222 /** 223 * ice_aq_get_link_topo_handle - get link topology node return status 224 * @pi: port information structure 225 * @node_type: requested node type 226 * @cd: pointer to command details structure or NULL 227 * 228 * Get link topology node return status for specified node type (0x06E0) 229 * 230 * Node type cage can be used to determine if cage is present. If AQC 231 * returns error (ENOENT), then no cage present. If no cage present, then 232 * connection type is backplane or BASE-T. 233 */ 234 static enum ice_status 235 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 236 struct ice_sq_cd *cd) 237 { 238 struct ice_aqc_get_link_topo *cmd; 239 struct ice_aq_desc desc; 240 241 cmd = &desc.params.get_link_topo; 242 243 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 244 245 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 246 ICE_AQC_LINK_TOPO_NODE_CTX_S); 247 248 /* set node type */ 249 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 250 251 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 252 } 253 254 /** 255 * ice_is_media_cage_present 256 * @pi: port information structure 257 * 258 * Returns true if media cage is present, else false. If no cage, then 259 * media type is backplane or BASE-T. 260 */ 261 static bool ice_is_media_cage_present(struct ice_port_info *pi) 262 { 263 /* Node type cage can be used to determine if cage is present. If AQC 264 * returns error (ENOENT), then no cage present. If no cage present then 265 * connection type is backplane or BASE-T. 266 */ 267 return !ice_aq_get_link_topo_handle(pi, 268 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 269 NULL); 270 } 271 272 /** 273 * ice_get_media_type - Gets media type 274 * @pi: port information structure 275 */ 276 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 277 { 278 struct ice_link_status *hw_link_info; 279 280 if (!pi) 281 return ICE_MEDIA_UNKNOWN; 282 283 hw_link_info = &pi->phy.link_info; 284 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 285 /* If more than one media type is selected, report unknown */ 286 return ICE_MEDIA_UNKNOWN; 287 288 if (hw_link_info->phy_type_low) { 289 /* 1G SGMII is a special case where some DA cable PHYs 290 * may show this as an option when it really shouldn't 291 * be since SGMII is meant to be between a MAC and a PHY 292 * in a backplane. Try to detect this case and handle it 293 */ 294 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 295 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 296 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 297 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 298 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 299 return ICE_MEDIA_DA; 300 301 switch (hw_link_info->phy_type_low) { 302 case ICE_PHY_TYPE_LOW_1000BASE_SX: 303 case ICE_PHY_TYPE_LOW_1000BASE_LX: 304 case ICE_PHY_TYPE_LOW_10GBASE_SR: 305 case ICE_PHY_TYPE_LOW_10GBASE_LR: 306 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 307 case ICE_PHY_TYPE_LOW_25GBASE_SR: 308 case ICE_PHY_TYPE_LOW_25GBASE_LR: 309 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 310 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 311 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 312 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 313 case ICE_PHY_TYPE_LOW_50GBASE_SR: 314 case ICE_PHY_TYPE_LOW_50GBASE_FR: 315 case ICE_PHY_TYPE_LOW_50GBASE_LR: 316 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 317 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 318 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 319 case ICE_PHY_TYPE_LOW_100GBASE_DR: 320 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 321 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 322 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 323 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 324 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 325 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 326 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 327 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 328 return ICE_MEDIA_FIBER; 329 case ICE_PHY_TYPE_LOW_100BASE_TX: 330 case ICE_PHY_TYPE_LOW_1000BASE_T: 331 case ICE_PHY_TYPE_LOW_2500BASE_T: 332 case ICE_PHY_TYPE_LOW_5GBASE_T: 333 case ICE_PHY_TYPE_LOW_10GBASE_T: 334 case ICE_PHY_TYPE_LOW_25GBASE_T: 335 return ICE_MEDIA_BASET; 336 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 337 case ICE_PHY_TYPE_LOW_25GBASE_CR: 338 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 339 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 340 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 341 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 342 case ICE_PHY_TYPE_LOW_50GBASE_CP: 343 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 344 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 345 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 346 return ICE_MEDIA_DA; 347 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 348 case ICE_PHY_TYPE_LOW_40G_XLAUI: 349 case ICE_PHY_TYPE_LOW_50G_LAUI2: 350 case ICE_PHY_TYPE_LOW_50G_AUI2: 351 case ICE_PHY_TYPE_LOW_50G_AUI1: 352 case ICE_PHY_TYPE_LOW_100G_AUI4: 353 case ICE_PHY_TYPE_LOW_100G_CAUI4: 354 if (ice_is_media_cage_present(pi)) 355 return ICE_MEDIA_DA; 356 fallthrough; 357 case ICE_PHY_TYPE_LOW_1000BASE_KX: 358 case ICE_PHY_TYPE_LOW_2500BASE_KX: 359 case ICE_PHY_TYPE_LOW_2500BASE_X: 360 case ICE_PHY_TYPE_LOW_5GBASE_KR: 361 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 362 case ICE_PHY_TYPE_LOW_25GBASE_KR: 363 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 364 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 365 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 366 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 367 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 368 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 369 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 370 return ICE_MEDIA_BACKPLANE; 371 } 372 } else { 373 switch (hw_link_info->phy_type_high) { 374 case ICE_PHY_TYPE_HIGH_100G_AUI2: 375 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 376 if (ice_is_media_cage_present(pi)) 377 return ICE_MEDIA_DA; 378 fallthrough; 379 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 380 return ICE_MEDIA_BACKPLANE; 381 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 382 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 383 return ICE_MEDIA_FIBER; 384 } 385 } 386 return ICE_MEDIA_UNKNOWN; 387 } 388 389 /** 390 * ice_aq_get_link_info 391 * @pi: port information structure 392 * @ena_lse: enable/disable LinkStatusEvent reporting 393 * @link: pointer to link status structure - optional 394 * @cd: pointer to command details structure or NULL 395 * 396 * Get Link Status (0x607). Returns the link status of the adapter. 397 */ 398 enum ice_status 399 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 400 struct ice_link_status *link, struct ice_sq_cd *cd) 401 { 402 struct ice_aqc_get_link_status_data link_data = { 0 }; 403 struct ice_aqc_get_link_status *resp; 404 struct ice_link_status *li_old, *li; 405 enum ice_media_type *hw_media_type; 406 struct ice_fc_info *hw_fc_info; 407 bool tx_pause, rx_pause; 408 struct ice_aq_desc desc; 409 enum ice_status status; 410 struct ice_hw *hw; 411 u16 cmd_flags; 412 413 if (!pi) 414 return ICE_ERR_PARAM; 415 hw = pi->hw; 416 li_old = &pi->phy.link_info_old; 417 hw_media_type = &pi->phy.media_type; 418 li = &pi->phy.link_info; 419 hw_fc_info = &pi->fc; 420 421 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 422 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 423 resp = &desc.params.get_link_status; 424 resp->cmd_flags = cpu_to_le16(cmd_flags); 425 resp->lport_num = pi->lport; 426 427 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); 428 429 if (status) 430 return status; 431 432 /* save off old link status information */ 433 *li_old = *li; 434 435 /* update current link status information */ 436 li->link_speed = le16_to_cpu(link_data.link_speed); 437 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 438 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 439 *hw_media_type = ice_get_media_type(pi); 440 li->link_info = link_data.link_info; 441 li->link_cfg_err = link_data.link_cfg_err; 442 li->an_info = link_data.an_info; 443 li->ext_info = link_data.ext_info; 444 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 445 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 446 li->topo_media_conflict = link_data.topo_media_conflict; 447 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 448 ICE_AQ_CFG_PACING_TYPE_M); 449 450 /* update fc info */ 451 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 452 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 453 if (tx_pause && rx_pause) 454 hw_fc_info->current_mode = ICE_FC_FULL; 455 else if (tx_pause) 456 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 457 else if (rx_pause) 458 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 459 else 460 hw_fc_info->current_mode = ICE_FC_NONE; 461 462 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 463 464 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 465 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 466 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 467 (unsigned long long)li->phy_type_low); 468 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 469 (unsigned long long)li->phy_type_high); 470 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 471 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 472 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 473 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 474 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 475 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 476 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 477 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 478 li->max_frame_size); 479 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 480 481 /* save link status information */ 482 if (link) 483 *link = *li; 484 485 /* flag cleared so calling functions don't call AQ again */ 486 pi->phy.get_link_info = false; 487 488 return 0; 489 } 490 491 /** 492 * ice_fill_tx_timer_and_fc_thresh 493 * @hw: pointer to the HW struct 494 * @cmd: pointer to MAC cfg structure 495 * 496 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 497 * descriptor 498 */ 499 static void 500 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 501 struct ice_aqc_set_mac_cfg *cmd) 502 { 503 u16 fc_thres_val, tx_timer_val; 504 u32 val; 505 506 /* We read back the transmit timer and FC threshold value of 507 * LFC. Thus, we will use index = 508 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 509 * 510 * Also, because we are operating on transmit timer and FC 511 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 512 */ 513 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 514 515 /* Retrieve the transmit timer */ 516 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 517 tx_timer_val = val & 518 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 519 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); 520 521 /* Retrieve the FC threshold */ 522 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 523 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 524 525 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); 526 } 527 528 /** 529 * ice_aq_set_mac_cfg 530 * @hw: pointer to the HW struct 531 * @max_frame_size: Maximum Frame Size to be supported 532 * @cd: pointer to command details structure or NULL 533 * 534 * Set MAC configuration (0x0603) 535 */ 536 enum ice_status 537 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 538 { 539 struct ice_aqc_set_mac_cfg *cmd; 540 struct ice_aq_desc desc; 541 542 cmd = &desc.params.set_mac_cfg; 543 544 if (max_frame_size == 0) 545 return ICE_ERR_PARAM; 546 547 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 548 549 cmd->max_frame_size = cpu_to_le16(max_frame_size); 550 551 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 552 553 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 554 } 555 556 /** 557 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 558 * @hw: pointer to the HW struct 559 */ 560 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 561 { 562 struct ice_switch_info *sw; 563 enum ice_status status; 564 565 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 566 sizeof(*hw->switch_info), GFP_KERNEL); 567 sw = hw->switch_info; 568 569 if (!sw) 570 return ICE_ERR_NO_MEMORY; 571 572 INIT_LIST_HEAD(&sw->vsi_list_map_head); 573 574 status = ice_init_def_sw_recp(hw); 575 if (status) { 576 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 577 return status; 578 } 579 return 0; 580 } 581 582 /** 583 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 584 * @hw: pointer to the HW struct 585 */ 586 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 587 { 588 struct ice_switch_info *sw = hw->switch_info; 589 struct ice_vsi_list_map_info *v_pos_map; 590 struct ice_vsi_list_map_info *v_tmp_map; 591 struct ice_sw_recipe *recps; 592 u8 i; 593 594 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 595 list_entry) { 596 list_del(&v_pos_map->list_entry); 597 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 598 } 599 recps = hw->switch_info->recp_list; 600 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 601 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 602 603 recps[i].root_rid = i; 604 mutex_destroy(&recps[i].filt_rule_lock); 605 list_for_each_entry_safe(lst_itr, tmp_entry, 606 &recps[i].filt_rules, list_entry) { 607 list_del(&lst_itr->list_entry); 608 devm_kfree(ice_hw_to_dev(hw), lst_itr); 609 } 610 } 611 ice_rm_all_sw_replay_rule_info(hw); 612 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 613 devm_kfree(ice_hw_to_dev(hw), sw); 614 } 615 616 /** 617 * ice_get_fw_log_cfg - get FW logging configuration 618 * @hw: pointer to the HW struct 619 */ 620 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) 621 { 622 struct ice_aq_desc desc; 623 enum ice_status status; 624 __le16 *config; 625 u16 size; 626 627 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 628 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); 629 if (!config) 630 return ICE_ERR_NO_MEMORY; 631 632 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 633 634 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 635 if (!status) { 636 u16 i; 637 638 /* Save FW logging information into the HW structure */ 639 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 640 u16 v, m, flgs; 641 642 v = le16_to_cpu(config[i]); 643 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 644 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 645 646 if (m < ICE_AQC_FW_LOG_ID_MAX) 647 hw->fw_log.evnts[m].cur = flgs; 648 } 649 } 650 651 devm_kfree(ice_hw_to_dev(hw), config); 652 653 return status; 654 } 655 656 /** 657 * ice_cfg_fw_log - configure FW logging 658 * @hw: pointer to the HW struct 659 * @enable: enable certain FW logging events if true, disable all if false 660 * 661 * This function enables/disables the FW logging via Rx CQ events and a UART 662 * port based on predetermined configurations. FW logging via the Rx CQ can be 663 * enabled/disabled for individual PF's. However, FW logging via the UART can 664 * only be enabled/disabled for all PFs on the same device. 665 * 666 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 667 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 668 * before initializing the device. 669 * 670 * When re/configuring FW logging, callers need to update the "cfg" elements of 671 * the hw->fw_log.evnts array with the desired logging event configurations for 672 * modules of interest. When disabling FW logging completely, the callers can 673 * just pass false in the "enable" parameter. On completion, the function will 674 * update the "cur" element of the hw->fw_log.evnts array with the resulting 675 * logging event configurations of the modules that are being re/configured. FW 676 * logging modules that are not part of a reconfiguration operation retain their 677 * previous states. 678 * 679 * Before resetting the device, it is recommended that the driver disables FW 680 * logging before shutting down the control queue. When disabling FW logging 681 * ("enable" = false), the latest configurations of FW logging events stored in 682 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 683 * a device reset. 684 * 685 * When enabling FW logging to emit log messages via the Rx CQ during the 686 * device's initialization phase, a mechanism alternative to interrupt handlers 687 * needs to be used to extract FW log messages from the Rx CQ periodically and 688 * to prevent the Rx CQ from being full and stalling other types of control 689 * messages from FW to SW. Interrupts are typically disabled during the device's 690 * initialization phase. 691 */ 692 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) 693 { 694 struct ice_aqc_fw_logging *cmd; 695 enum ice_status status = 0; 696 u16 i, chgs = 0, len = 0; 697 struct ice_aq_desc desc; 698 __le16 *data = NULL; 699 u8 actv_evnts = 0; 700 void *buf = NULL; 701 702 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 703 return 0; 704 705 /* Disable FW logging only when the control queue is still responsive */ 706 if (!enable && 707 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 708 return 0; 709 710 /* Get current FW log settings */ 711 status = ice_get_fw_log_cfg(hw); 712 if (status) 713 return status; 714 715 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 716 cmd = &desc.params.fw_logging; 717 718 /* Indicate which controls are valid */ 719 if (hw->fw_log.cq_en) 720 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 721 722 if (hw->fw_log.uart_en) 723 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 724 725 if (enable) { 726 /* Fill in an array of entries with FW logging modules and 727 * logging events being reconfigured. 728 */ 729 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 730 u16 val; 731 732 /* Keep track of enabled event types */ 733 actv_evnts |= hw->fw_log.evnts[i].cfg; 734 735 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 736 continue; 737 738 if (!data) { 739 data = devm_kcalloc(ice_hw_to_dev(hw), 740 ICE_AQC_FW_LOG_ID_MAX, 741 sizeof(*data), 742 GFP_KERNEL); 743 if (!data) 744 return ICE_ERR_NO_MEMORY; 745 } 746 747 val = i << ICE_AQC_FW_LOG_ID_S; 748 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 749 data[chgs++] = cpu_to_le16(val); 750 } 751 752 /* Only enable FW logging if at least one module is specified. 753 * If FW logging is currently enabled but all modules are not 754 * enabled to emit log messages, disable FW logging altogether. 755 */ 756 if (actv_evnts) { 757 /* Leave if there is effectively no change */ 758 if (!chgs) 759 goto out; 760 761 if (hw->fw_log.cq_en) 762 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 763 764 if (hw->fw_log.uart_en) 765 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 766 767 buf = data; 768 len = sizeof(*data) * chgs; 769 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 770 } 771 } 772 773 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 774 if (!status) { 775 /* Update the current configuration to reflect events enabled. 776 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 777 * logging mode is enabled for the device. They do not reflect 778 * actual modules being enabled to emit log messages. So, their 779 * values remain unchanged even when all modules are disabled. 780 */ 781 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 782 783 hw->fw_log.actv_evnts = actv_evnts; 784 for (i = 0; i < cnt; i++) { 785 u16 v, m; 786 787 if (!enable) { 788 /* When disabling all FW logging events as part 789 * of device's de-initialization, the original 790 * configurations are retained, and can be used 791 * to reconfigure FW logging later if the device 792 * is re-initialized. 793 */ 794 hw->fw_log.evnts[i].cur = 0; 795 continue; 796 } 797 798 v = le16_to_cpu(data[i]); 799 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 800 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 801 } 802 } 803 804 out: 805 if (data) 806 devm_kfree(ice_hw_to_dev(hw), data); 807 808 return status; 809 } 810 811 /** 812 * ice_output_fw_log 813 * @hw: pointer to the HW struct 814 * @desc: pointer to the AQ message descriptor 815 * @buf: pointer to the buffer accompanying the AQ message 816 * 817 * Formats a FW Log message and outputs it via the standard driver logs. 818 */ 819 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 820 { 821 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 822 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 823 le16_to_cpu(desc->datalen)); 824 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 825 } 826 827 /** 828 * ice_get_itr_intrl_gran 829 * @hw: pointer to the HW struct 830 * 831 * Determines the ITR/INTRL granularities based on the maximum aggregate 832 * bandwidth according to the device's configuration during power-on. 833 */ 834 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 835 { 836 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 837 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 838 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 839 840 switch (max_agg_bw) { 841 case ICE_MAX_AGG_BW_200G: 842 case ICE_MAX_AGG_BW_100G: 843 case ICE_MAX_AGG_BW_50G: 844 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 845 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 846 break; 847 case ICE_MAX_AGG_BW_25G: 848 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 849 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 850 break; 851 } 852 } 853 854 /** 855 * ice_init_hw - main hardware initialization routine 856 * @hw: pointer to the hardware structure 857 */ 858 enum ice_status ice_init_hw(struct ice_hw *hw) 859 { 860 struct ice_aqc_get_phy_caps_data *pcaps; 861 enum ice_status status; 862 u16 mac_buf_len; 863 void *mac_buf; 864 865 /* Set MAC type based on DeviceID */ 866 status = ice_set_mac_type(hw); 867 if (status) 868 return status; 869 870 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 871 PF_FUNC_RID_FUNC_NUM_M) >> 872 PF_FUNC_RID_FUNC_NUM_S; 873 874 status = ice_reset(hw, ICE_RESET_PFR); 875 if (status) 876 return status; 877 878 ice_get_itr_intrl_gran(hw); 879 880 status = ice_create_all_ctrlq(hw); 881 if (status) 882 goto err_unroll_cqinit; 883 884 /* Enable FW logging. Not fatal if this fails. */ 885 status = ice_cfg_fw_log(hw, true); 886 if (status) 887 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 888 889 status = ice_clear_pf_cfg(hw); 890 if (status) 891 goto err_unroll_cqinit; 892 893 /* Set bit to enable Flow Director filters */ 894 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 895 INIT_LIST_HEAD(&hw->fdir_list_head); 896 897 ice_clear_pxe_mode(hw); 898 899 status = ice_init_nvm(hw); 900 if (status) 901 goto err_unroll_cqinit; 902 903 status = ice_get_caps(hw); 904 if (status) 905 goto err_unroll_cqinit; 906 907 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 908 sizeof(*hw->port_info), GFP_KERNEL); 909 if (!hw->port_info) { 910 status = ICE_ERR_NO_MEMORY; 911 goto err_unroll_cqinit; 912 } 913 914 /* set the back pointer to HW */ 915 hw->port_info->hw = hw; 916 917 /* Initialize port_info struct with switch configuration data */ 918 status = ice_get_initial_sw_cfg(hw); 919 if (status) 920 goto err_unroll_alloc; 921 922 hw->evb_veb = true; 923 924 /* Query the allocated resources for Tx scheduler */ 925 status = ice_sched_query_res_alloc(hw); 926 if (status) { 927 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 928 goto err_unroll_alloc; 929 } 930 ice_sched_get_psm_clk_freq(hw); 931 932 /* Initialize port_info struct with scheduler data */ 933 status = ice_sched_init_port(hw->port_info); 934 if (status) 935 goto err_unroll_sched; 936 937 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 938 if (!pcaps) { 939 status = ICE_ERR_NO_MEMORY; 940 goto err_unroll_sched; 941 } 942 943 /* Initialize port_info struct with PHY capabilities */ 944 status = ice_aq_get_phy_caps(hw->port_info, false, 945 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 946 NULL); 947 devm_kfree(ice_hw_to_dev(hw), pcaps); 948 if (status) 949 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 950 status); 951 952 /* Initialize port_info struct with link information */ 953 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 954 if (status) 955 goto err_unroll_sched; 956 957 /* need a valid SW entry point to build a Tx tree */ 958 if (!hw->sw_entry_point_layer) { 959 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 960 status = ICE_ERR_CFG; 961 goto err_unroll_sched; 962 } 963 INIT_LIST_HEAD(&hw->agg_list); 964 /* Initialize max burst size */ 965 if (!hw->max_burst_size) 966 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 967 968 status = ice_init_fltr_mgmt_struct(hw); 969 if (status) 970 goto err_unroll_sched; 971 972 /* Get MAC information */ 973 /* A single port can report up to two (LAN and WoL) addresses */ 974 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 975 sizeof(struct ice_aqc_manage_mac_read_resp), 976 GFP_KERNEL); 977 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 978 979 if (!mac_buf) { 980 status = ICE_ERR_NO_MEMORY; 981 goto err_unroll_fltr_mgmt_struct; 982 } 983 984 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 985 devm_kfree(ice_hw_to_dev(hw), mac_buf); 986 987 if (status) 988 goto err_unroll_fltr_mgmt_struct; 989 /* enable jumbo frame support at MAC level */ 990 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 991 if (status) 992 goto err_unroll_fltr_mgmt_struct; 993 /* Obtain counter base index which would be used by flow director */ 994 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 995 if (status) 996 goto err_unroll_fltr_mgmt_struct; 997 status = ice_init_hw_tbls(hw); 998 if (status) 999 goto err_unroll_fltr_mgmt_struct; 1000 mutex_init(&hw->tnl_lock); 1001 return 0; 1002 1003 err_unroll_fltr_mgmt_struct: 1004 ice_cleanup_fltr_mgmt_struct(hw); 1005 err_unroll_sched: 1006 ice_sched_cleanup_all(hw); 1007 err_unroll_alloc: 1008 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1009 err_unroll_cqinit: 1010 ice_destroy_all_ctrlq(hw); 1011 return status; 1012 } 1013 1014 /** 1015 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1016 * @hw: pointer to the hardware structure 1017 * 1018 * This should be called only during nominal operation, not as a result of 1019 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1020 * applicable initializations if it fails for any reason. 1021 */ 1022 void ice_deinit_hw(struct ice_hw *hw) 1023 { 1024 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1025 ice_cleanup_fltr_mgmt_struct(hw); 1026 1027 ice_sched_cleanup_all(hw); 1028 ice_sched_clear_agg(hw); 1029 ice_free_seg(hw); 1030 ice_free_hw_tbls(hw); 1031 mutex_destroy(&hw->tnl_lock); 1032 1033 if (hw->port_info) { 1034 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1035 hw->port_info = NULL; 1036 } 1037 1038 /* Attempt to disable FW logging before shutting down control queues */ 1039 ice_cfg_fw_log(hw, false); 1040 ice_destroy_all_ctrlq(hw); 1041 1042 /* Clear VSI contexts if not already cleared */ 1043 ice_clear_all_vsi_ctx(hw); 1044 } 1045 1046 /** 1047 * ice_check_reset - Check to see if a global reset is complete 1048 * @hw: pointer to the hardware structure 1049 */ 1050 enum ice_status ice_check_reset(struct ice_hw *hw) 1051 { 1052 u32 cnt, reg = 0, grst_timeout, uld_mask; 1053 1054 /* Poll for Device Active state in case a recent CORER, GLOBR, 1055 * or EMPR has occurred. The grst delay value is in 100ms units. 1056 * Add 1sec for outstanding AQ commands that can take a long time. 1057 */ 1058 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1059 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1060 1061 for (cnt = 0; cnt < grst_timeout; cnt++) { 1062 mdelay(100); 1063 reg = rd32(hw, GLGEN_RSTAT); 1064 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1065 break; 1066 } 1067 1068 if (cnt == grst_timeout) { 1069 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1070 return ICE_ERR_RESET_FAILED; 1071 } 1072 1073 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1074 GLNVM_ULD_PCIER_DONE_1_M |\ 1075 GLNVM_ULD_CORER_DONE_M |\ 1076 GLNVM_ULD_GLOBR_DONE_M |\ 1077 GLNVM_ULD_POR_DONE_M |\ 1078 GLNVM_ULD_POR_DONE_1_M |\ 1079 GLNVM_ULD_PCIER_DONE_2_M) 1080 1081 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1082 GLNVM_ULD_PE_DONE_M : 0); 1083 1084 /* Device is Active; check Global Reset processes are done */ 1085 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1086 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1087 if (reg == uld_mask) { 1088 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1089 break; 1090 } 1091 mdelay(10); 1092 } 1093 1094 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1095 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1096 reg); 1097 return ICE_ERR_RESET_FAILED; 1098 } 1099 1100 return 0; 1101 } 1102 1103 /** 1104 * ice_pf_reset - Reset the PF 1105 * @hw: pointer to the hardware structure 1106 * 1107 * If a global reset has been triggered, this function checks 1108 * for its completion and then issues the PF reset 1109 */ 1110 static enum ice_status ice_pf_reset(struct ice_hw *hw) 1111 { 1112 u32 cnt, reg; 1113 1114 /* If at function entry a global reset was already in progress, i.e. 1115 * state is not 'device active' or any of the reset done bits are not 1116 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1117 * global reset is done. 1118 */ 1119 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1120 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1121 /* poll on global reset currently in progress until done */ 1122 if (ice_check_reset(hw)) 1123 return ICE_ERR_RESET_FAILED; 1124 1125 return 0; 1126 } 1127 1128 /* Reset the PF */ 1129 reg = rd32(hw, PFGEN_CTRL); 1130 1131 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1132 1133 /* Wait for the PFR to complete. The wait time is the global config lock 1134 * timeout plus the PFR timeout which will account for a possible reset 1135 * that is occurring during a download package operation. 1136 */ 1137 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1138 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1139 reg = rd32(hw, PFGEN_CTRL); 1140 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1141 break; 1142 1143 mdelay(1); 1144 } 1145 1146 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1147 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1148 return ICE_ERR_RESET_FAILED; 1149 } 1150 1151 return 0; 1152 } 1153 1154 /** 1155 * ice_reset - Perform different types of reset 1156 * @hw: pointer to the hardware structure 1157 * @req: reset request 1158 * 1159 * This function triggers a reset as specified by the req parameter. 1160 * 1161 * Note: 1162 * If anything other than a PF reset is triggered, PXE mode is restored. 1163 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1164 * interface has been restored in the rebuild flow. 1165 */ 1166 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1167 { 1168 u32 val = 0; 1169 1170 switch (req) { 1171 case ICE_RESET_PFR: 1172 return ice_pf_reset(hw); 1173 case ICE_RESET_CORER: 1174 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1175 val = GLGEN_RTRIG_CORER_M; 1176 break; 1177 case ICE_RESET_GLOBR: 1178 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1179 val = GLGEN_RTRIG_GLOBR_M; 1180 break; 1181 default: 1182 return ICE_ERR_PARAM; 1183 } 1184 1185 val |= rd32(hw, GLGEN_RTRIG); 1186 wr32(hw, GLGEN_RTRIG, val); 1187 ice_flush(hw); 1188 1189 /* wait for the FW to be ready */ 1190 return ice_check_reset(hw); 1191 } 1192 1193 /** 1194 * ice_copy_rxq_ctx_to_hw 1195 * @hw: pointer to the hardware structure 1196 * @ice_rxq_ctx: pointer to the rxq context 1197 * @rxq_index: the index of the Rx queue 1198 * 1199 * Copies rxq context from dense structure to HW register space 1200 */ 1201 static enum ice_status 1202 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1203 { 1204 u8 i; 1205 1206 if (!ice_rxq_ctx) 1207 return ICE_ERR_BAD_PTR; 1208 1209 if (rxq_index > QRX_CTRL_MAX_INDEX) 1210 return ICE_ERR_PARAM; 1211 1212 /* Copy each dword separately to HW */ 1213 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1214 wr32(hw, QRX_CONTEXT(i, rxq_index), 1215 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1216 1217 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1218 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1219 } 1220 1221 return 0; 1222 } 1223 1224 /* LAN Rx Queue Context */ 1225 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1226 /* Field Width LSB */ 1227 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1228 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1229 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1230 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1231 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1232 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1233 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1234 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1235 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1236 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1237 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1238 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1239 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1240 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1241 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1242 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1243 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1244 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1245 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1246 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1247 { 0 } 1248 }; 1249 1250 /** 1251 * ice_write_rxq_ctx 1252 * @hw: pointer to the hardware structure 1253 * @rlan_ctx: pointer to the rxq context 1254 * @rxq_index: the index of the Rx queue 1255 * 1256 * Converts rxq context from sparse to dense structure and then writes 1257 * it to HW register space and enables the hardware to prefetch descriptors 1258 * instead of only fetching them on demand 1259 */ 1260 enum ice_status 1261 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1262 u32 rxq_index) 1263 { 1264 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1265 1266 if (!rlan_ctx) 1267 return ICE_ERR_BAD_PTR; 1268 1269 rlan_ctx->prefena = 1; 1270 1271 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1272 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1273 } 1274 1275 /* LAN Tx Queue Context */ 1276 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1277 /* Field Width LSB */ 1278 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1279 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1280 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1281 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1282 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1283 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1284 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1285 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1286 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1287 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1288 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1289 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1290 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1291 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1292 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1293 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1294 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1295 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1296 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1297 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1298 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1299 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1300 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1301 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1302 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1303 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1304 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1305 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1306 { 0 } 1307 }; 1308 1309 /* Sideband Queue command wrappers */ 1310 1311 /** 1312 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue 1313 * @hw: pointer to the HW struct 1314 * @desc: descriptor describing the command 1315 * @buf: buffer to use for indirect commands (NULL for direct commands) 1316 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1317 * @cd: pointer to command details structure 1318 */ 1319 static int 1320 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, 1321 void *buf, u16 buf_size, struct ice_sq_cd *cd) 1322 { 1323 return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw), 1324 (struct ice_aq_desc *)desc, 1325 buf, buf_size, cd)); 1326 } 1327 1328 /** 1329 * ice_sbq_rw_reg - Fill Sideband Queue command 1330 * @hw: pointer to the HW struct 1331 * @in: message info to be filled in descriptor 1332 */ 1333 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) 1334 { 1335 struct ice_sbq_cmd_desc desc = {0}; 1336 struct ice_sbq_msg_req msg = {0}; 1337 u16 msg_len; 1338 int status; 1339 1340 msg_len = sizeof(msg); 1341 1342 msg.dest_dev = in->dest_dev; 1343 msg.opcode = in->opcode; 1344 msg.flags = ICE_SBQ_MSG_FLAGS; 1345 msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE; 1346 msg.msg_addr_low = cpu_to_le16(in->msg_addr_low); 1347 msg.msg_addr_high = cpu_to_le32(in->msg_addr_high); 1348 1349 if (in->opcode) 1350 msg.data = cpu_to_le32(in->data); 1351 else 1352 /* data read comes back in completion, so shorten the struct by 1353 * sizeof(msg.data) 1354 */ 1355 msg_len -= sizeof(msg.data); 1356 1357 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 1358 desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); 1359 desc.param0.cmd_len = cpu_to_le16(msg_len); 1360 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); 1361 if (!status && !in->opcode) 1362 in->data = le32_to_cpu 1363 (((struct ice_sbq_msg_cmpl *)&msg)->data); 1364 return status; 1365 } 1366 1367 /* FW Admin Queue command wrappers */ 1368 1369 /* Software lock/mutex that is meant to be held while the Global Config Lock 1370 * in firmware is acquired by the software to prevent most (but not all) types 1371 * of AQ commands from being sent to FW 1372 */ 1373 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1374 1375 /** 1376 * ice_should_retry_sq_send_cmd 1377 * @opcode: AQ opcode 1378 * 1379 * Decide if we should retry the send command routine for the ATQ, depending 1380 * on the opcode. 1381 */ 1382 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1383 { 1384 switch (opcode) { 1385 case ice_aqc_opc_get_link_topo: 1386 case ice_aqc_opc_lldp_stop: 1387 case ice_aqc_opc_lldp_start: 1388 case ice_aqc_opc_lldp_filter_ctrl: 1389 return true; 1390 } 1391 1392 return false; 1393 } 1394 1395 /** 1396 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1397 * @hw: pointer to the HW struct 1398 * @cq: pointer to the specific Control queue 1399 * @desc: prefilled descriptor describing the command 1400 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1401 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1402 * @cd: pointer to command details structure 1403 * 1404 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1405 * Queue if the EBUSY AQ error is returned. 1406 */ 1407 static enum ice_status 1408 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1409 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1410 struct ice_sq_cd *cd) 1411 { 1412 struct ice_aq_desc desc_cpy; 1413 enum ice_status status; 1414 bool is_cmd_for_retry; 1415 u8 *buf_cpy = NULL; 1416 u8 idx = 0; 1417 u16 opcode; 1418 1419 opcode = le16_to_cpu(desc->opcode); 1420 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1421 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1422 1423 if (is_cmd_for_retry) { 1424 if (buf) { 1425 buf_cpy = kzalloc(buf_size, GFP_KERNEL); 1426 if (!buf_cpy) 1427 return ICE_ERR_NO_MEMORY; 1428 } 1429 1430 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1431 } 1432 1433 do { 1434 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1435 1436 if (!is_cmd_for_retry || !status || 1437 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1438 break; 1439 1440 if (buf_cpy) 1441 memcpy(buf, buf_cpy, buf_size); 1442 1443 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1444 1445 mdelay(ICE_SQ_SEND_DELAY_TIME_MS); 1446 1447 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1448 1449 kfree(buf_cpy); 1450 1451 return status; 1452 } 1453 1454 /** 1455 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1456 * @hw: pointer to the HW struct 1457 * @desc: descriptor describing the command 1458 * @buf: buffer to use for indirect commands (NULL for direct commands) 1459 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1460 * @cd: pointer to command details structure 1461 * 1462 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1463 */ 1464 enum ice_status 1465 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1466 u16 buf_size, struct ice_sq_cd *cd) 1467 { 1468 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1469 bool lock_acquired = false; 1470 enum ice_status status; 1471 1472 /* When a package download is in process (i.e. when the firmware's 1473 * Global Configuration Lock resource is held), only the Download 1474 * Package, Get Version, Get Package Info List and Release Resource 1475 * (with resource ID set to Global Config Lock) AdminQ commands are 1476 * allowed; all others must block until the package download completes 1477 * and the Global Config Lock is released. See also 1478 * ice_acquire_global_cfg_lock(). 1479 */ 1480 switch (le16_to_cpu(desc->opcode)) { 1481 case ice_aqc_opc_download_pkg: 1482 case ice_aqc_opc_get_pkg_info_list: 1483 case ice_aqc_opc_get_ver: 1484 break; 1485 case ice_aqc_opc_release_res: 1486 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1487 break; 1488 fallthrough; 1489 default: 1490 mutex_lock(&ice_global_cfg_lock_sw); 1491 lock_acquired = true; 1492 break; 1493 } 1494 1495 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1496 if (lock_acquired) 1497 mutex_unlock(&ice_global_cfg_lock_sw); 1498 1499 return status; 1500 } 1501 1502 /** 1503 * ice_aq_get_fw_ver 1504 * @hw: pointer to the HW struct 1505 * @cd: pointer to command details structure or NULL 1506 * 1507 * Get the firmware version (0x0001) from the admin queue commands 1508 */ 1509 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1510 { 1511 struct ice_aqc_get_ver *resp; 1512 struct ice_aq_desc desc; 1513 enum ice_status status; 1514 1515 resp = &desc.params.get_ver; 1516 1517 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1518 1519 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1520 1521 if (!status) { 1522 hw->fw_branch = resp->fw_branch; 1523 hw->fw_maj_ver = resp->fw_major; 1524 hw->fw_min_ver = resp->fw_minor; 1525 hw->fw_patch = resp->fw_patch; 1526 hw->fw_build = le32_to_cpu(resp->fw_build); 1527 hw->api_branch = resp->api_branch; 1528 hw->api_maj_ver = resp->api_major; 1529 hw->api_min_ver = resp->api_minor; 1530 hw->api_patch = resp->api_patch; 1531 } 1532 1533 return status; 1534 } 1535 1536 /** 1537 * ice_aq_send_driver_ver 1538 * @hw: pointer to the HW struct 1539 * @dv: driver's major, minor version 1540 * @cd: pointer to command details structure or NULL 1541 * 1542 * Send the driver version (0x0002) to the firmware 1543 */ 1544 enum ice_status 1545 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1546 struct ice_sq_cd *cd) 1547 { 1548 struct ice_aqc_driver_ver *cmd; 1549 struct ice_aq_desc desc; 1550 u16 len; 1551 1552 cmd = &desc.params.driver_ver; 1553 1554 if (!dv) 1555 return ICE_ERR_PARAM; 1556 1557 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1558 1559 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1560 cmd->major_ver = dv->major_ver; 1561 cmd->minor_ver = dv->minor_ver; 1562 cmd->build_ver = dv->build_ver; 1563 cmd->subbuild_ver = dv->subbuild_ver; 1564 1565 len = 0; 1566 while (len < sizeof(dv->driver_string) && 1567 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1568 len++; 1569 1570 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1571 } 1572 1573 /** 1574 * ice_aq_q_shutdown 1575 * @hw: pointer to the HW struct 1576 * @unloading: is the driver unloading itself 1577 * 1578 * Tell the Firmware that we're shutting down the AdminQ and whether 1579 * or not the driver is unloading as well (0x0003). 1580 */ 1581 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1582 { 1583 struct ice_aqc_q_shutdown *cmd; 1584 struct ice_aq_desc desc; 1585 1586 cmd = &desc.params.q_shutdown; 1587 1588 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1589 1590 if (unloading) 1591 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1592 1593 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1594 } 1595 1596 /** 1597 * ice_aq_req_res 1598 * @hw: pointer to the HW struct 1599 * @res: resource ID 1600 * @access: access type 1601 * @sdp_number: resource number 1602 * @timeout: the maximum time in ms that the driver may hold the resource 1603 * @cd: pointer to command details structure or NULL 1604 * 1605 * Requests common resource using the admin queue commands (0x0008). 1606 * When attempting to acquire the Global Config Lock, the driver can 1607 * learn of three states: 1608 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1609 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1610 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1611 * successfully downloaded the package; the driver does 1612 * not have to download the package and can continue 1613 * loading 1614 * 1615 * Note that if the caller is in an acquire lock, perform action, release lock 1616 * phase of operation, it is possible that the FW may detect a timeout and issue 1617 * a CORER. In this case, the driver will receive a CORER interrupt and will 1618 * have to determine its cause. The calling thread that is handling this flow 1619 * will likely get an error propagated back to it indicating the Download 1620 * Package, Update Package or the Release Resource AQ commands timed out. 1621 */ 1622 static enum ice_status 1623 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1624 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1625 struct ice_sq_cd *cd) 1626 { 1627 struct ice_aqc_req_res *cmd_resp; 1628 struct ice_aq_desc desc; 1629 enum ice_status status; 1630 1631 cmd_resp = &desc.params.res_owner; 1632 1633 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1634 1635 cmd_resp->res_id = cpu_to_le16(res); 1636 cmd_resp->access_type = cpu_to_le16(access); 1637 cmd_resp->res_number = cpu_to_le32(sdp_number); 1638 cmd_resp->timeout = cpu_to_le32(*timeout); 1639 *timeout = 0; 1640 1641 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1642 1643 /* The completion specifies the maximum time in ms that the driver 1644 * may hold the resource in the Timeout field. 1645 */ 1646 1647 /* Global config lock response utilizes an additional status field. 1648 * 1649 * If the Global config lock resource is held by some other driver, the 1650 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1651 * and the timeout field indicates the maximum time the current owner 1652 * of the resource has to free it. 1653 */ 1654 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1655 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1656 *timeout = le32_to_cpu(cmd_resp->timeout); 1657 return 0; 1658 } else if (le16_to_cpu(cmd_resp->status) == 1659 ICE_AQ_RES_GLBL_IN_PROG) { 1660 *timeout = le32_to_cpu(cmd_resp->timeout); 1661 return ICE_ERR_AQ_ERROR; 1662 } else if (le16_to_cpu(cmd_resp->status) == 1663 ICE_AQ_RES_GLBL_DONE) { 1664 return ICE_ERR_AQ_NO_WORK; 1665 } 1666 1667 /* invalid FW response, force a timeout immediately */ 1668 *timeout = 0; 1669 return ICE_ERR_AQ_ERROR; 1670 } 1671 1672 /* If the resource is held by some other driver, the command completes 1673 * with a busy return value and the timeout field indicates the maximum 1674 * time the current owner of the resource has to free it. 1675 */ 1676 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1677 *timeout = le32_to_cpu(cmd_resp->timeout); 1678 1679 return status; 1680 } 1681 1682 /** 1683 * ice_aq_release_res 1684 * @hw: pointer to the HW struct 1685 * @res: resource ID 1686 * @sdp_number: resource number 1687 * @cd: pointer to command details structure or NULL 1688 * 1689 * release common resource using the admin queue commands (0x0009) 1690 */ 1691 static enum ice_status 1692 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1693 struct ice_sq_cd *cd) 1694 { 1695 struct ice_aqc_req_res *cmd; 1696 struct ice_aq_desc desc; 1697 1698 cmd = &desc.params.res_owner; 1699 1700 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1701 1702 cmd->res_id = cpu_to_le16(res); 1703 cmd->res_number = cpu_to_le32(sdp_number); 1704 1705 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1706 } 1707 1708 /** 1709 * ice_acquire_res 1710 * @hw: pointer to the HW structure 1711 * @res: resource ID 1712 * @access: access type (read or write) 1713 * @timeout: timeout in milliseconds 1714 * 1715 * This function will attempt to acquire the ownership of a resource. 1716 */ 1717 enum ice_status 1718 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1719 enum ice_aq_res_access_type access, u32 timeout) 1720 { 1721 #define ICE_RES_POLLING_DELAY_MS 10 1722 u32 delay = ICE_RES_POLLING_DELAY_MS; 1723 u32 time_left = timeout; 1724 enum ice_status status; 1725 1726 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1727 1728 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 1729 * previously acquired the resource and performed any necessary updates; 1730 * in this case the caller does not obtain the resource and has no 1731 * further work to do. 1732 */ 1733 if (status == ICE_ERR_AQ_NO_WORK) 1734 goto ice_acquire_res_exit; 1735 1736 if (status) 1737 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1738 1739 /* If necessary, poll until the current lock owner timeouts */ 1740 timeout = time_left; 1741 while (status && timeout && time_left) { 1742 mdelay(delay); 1743 timeout = (timeout > delay) ? timeout - delay : 0; 1744 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1745 1746 if (status == ICE_ERR_AQ_NO_WORK) 1747 /* lock free, but no work to do */ 1748 break; 1749 1750 if (!status) 1751 /* lock acquired */ 1752 break; 1753 } 1754 if (status && status != ICE_ERR_AQ_NO_WORK) 1755 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1756 1757 ice_acquire_res_exit: 1758 if (status == ICE_ERR_AQ_NO_WORK) { 1759 if (access == ICE_RES_WRITE) 1760 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1761 else 1762 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1763 } 1764 return status; 1765 } 1766 1767 /** 1768 * ice_release_res 1769 * @hw: pointer to the HW structure 1770 * @res: resource ID 1771 * 1772 * This function will release a resource using the proper Admin Command. 1773 */ 1774 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1775 { 1776 enum ice_status status; 1777 u32 total_delay = 0; 1778 1779 status = ice_aq_release_res(hw, res, 0, NULL); 1780 1781 /* there are some rare cases when trying to release the resource 1782 * results in an admin queue timeout, so handle them correctly 1783 */ 1784 while ((status == ICE_ERR_AQ_TIMEOUT) && 1785 (total_delay < hw->adminq.sq_cmd_timeout)) { 1786 mdelay(1); 1787 status = ice_aq_release_res(hw, res, 0, NULL); 1788 total_delay++; 1789 } 1790 } 1791 1792 /** 1793 * ice_aq_alloc_free_res - command to allocate/free resources 1794 * @hw: pointer to the HW struct 1795 * @num_entries: number of resource entries in buffer 1796 * @buf: Indirect buffer to hold data parameters and response 1797 * @buf_size: size of buffer for indirect commands 1798 * @opc: pass in the command opcode 1799 * @cd: pointer to command details structure or NULL 1800 * 1801 * Helper function to allocate/free resources using the admin queue commands 1802 */ 1803 enum ice_status 1804 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 1805 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1806 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1807 { 1808 struct ice_aqc_alloc_free_res_cmd *cmd; 1809 struct ice_aq_desc desc; 1810 1811 cmd = &desc.params.sw_res_ctrl; 1812 1813 if (!buf) 1814 return ICE_ERR_PARAM; 1815 1816 if (buf_size < flex_array_size(buf, elem, num_entries)) 1817 return ICE_ERR_PARAM; 1818 1819 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1820 1821 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1822 1823 cmd->num_entries = cpu_to_le16(num_entries); 1824 1825 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1826 } 1827 1828 /** 1829 * ice_alloc_hw_res - allocate resource 1830 * @hw: pointer to the HW struct 1831 * @type: type of resource 1832 * @num: number of resources to allocate 1833 * @btm: allocate from bottom 1834 * @res: pointer to array that will receive the resources 1835 */ 1836 enum ice_status 1837 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1838 { 1839 struct ice_aqc_alloc_free_res_elem *buf; 1840 enum ice_status status; 1841 u16 buf_len; 1842 1843 buf_len = struct_size(buf, elem, num); 1844 buf = kzalloc(buf_len, GFP_KERNEL); 1845 if (!buf) 1846 return ICE_ERR_NO_MEMORY; 1847 1848 /* Prepare buffer to allocate resource. */ 1849 buf->num_elems = cpu_to_le16(num); 1850 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1851 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1852 if (btm) 1853 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1854 1855 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 1856 ice_aqc_opc_alloc_res, NULL); 1857 if (status) 1858 goto ice_alloc_res_exit; 1859 1860 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 1861 1862 ice_alloc_res_exit: 1863 kfree(buf); 1864 return status; 1865 } 1866 1867 /** 1868 * ice_free_hw_res - free allocated HW resource 1869 * @hw: pointer to the HW struct 1870 * @type: type of resource to free 1871 * @num: number of resources 1872 * @res: pointer to array that contains the resources to free 1873 */ 1874 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 1875 { 1876 struct ice_aqc_alloc_free_res_elem *buf; 1877 enum ice_status status; 1878 u16 buf_len; 1879 1880 buf_len = struct_size(buf, elem, num); 1881 buf = kzalloc(buf_len, GFP_KERNEL); 1882 if (!buf) 1883 return ICE_ERR_NO_MEMORY; 1884 1885 /* Prepare buffer to free resource. */ 1886 buf->num_elems = cpu_to_le16(num); 1887 buf->res_type = cpu_to_le16(type); 1888 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 1889 1890 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 1891 ice_aqc_opc_free_res, NULL); 1892 if (status) 1893 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 1894 1895 kfree(buf); 1896 return status; 1897 } 1898 1899 /** 1900 * ice_get_num_per_func - determine number of resources per PF 1901 * @hw: pointer to the HW structure 1902 * @max: value to be evenly split between each PF 1903 * 1904 * Determine the number of valid functions by going through the bitmap returned 1905 * from parsing capabilities and use this to calculate the number of resources 1906 * per PF based on the max value passed in. 1907 */ 1908 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 1909 { 1910 u8 funcs; 1911 1912 #define ICE_CAPS_VALID_FUNCS_M 0xFF 1913 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 1914 ICE_CAPS_VALID_FUNCS_M); 1915 1916 if (!funcs) 1917 return 0; 1918 1919 return max / funcs; 1920 } 1921 1922 /** 1923 * ice_parse_common_caps - parse common device/function capabilities 1924 * @hw: pointer to the HW struct 1925 * @caps: pointer to common capabilities structure 1926 * @elem: the capability element to parse 1927 * @prefix: message prefix for tracing capabilities 1928 * 1929 * Given a capability element, extract relevant details into the common 1930 * capability structure. 1931 * 1932 * Returns: true if the capability matches one of the common capability ids, 1933 * false otherwise. 1934 */ 1935 static bool 1936 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 1937 struct ice_aqc_list_caps_elem *elem, const char *prefix) 1938 { 1939 u32 logical_id = le32_to_cpu(elem->logical_id); 1940 u32 phys_id = le32_to_cpu(elem->phys_id); 1941 u32 number = le32_to_cpu(elem->number); 1942 u16 cap = le16_to_cpu(elem->cap); 1943 bool found = true; 1944 1945 switch (cap) { 1946 case ICE_AQC_CAPS_VALID_FUNCTIONS: 1947 caps->valid_functions = number; 1948 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 1949 caps->valid_functions); 1950 break; 1951 case ICE_AQC_CAPS_SRIOV: 1952 caps->sr_iov_1_1 = (number == 1); 1953 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 1954 caps->sr_iov_1_1); 1955 break; 1956 case ICE_AQC_CAPS_DCB: 1957 caps->dcb = (number == 1); 1958 caps->active_tc_bitmap = logical_id; 1959 caps->maxtc = phys_id; 1960 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 1961 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 1962 caps->active_tc_bitmap); 1963 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 1964 break; 1965 case ICE_AQC_CAPS_RSS: 1966 caps->rss_table_size = number; 1967 caps->rss_table_entry_width = logical_id; 1968 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 1969 caps->rss_table_size); 1970 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 1971 caps->rss_table_entry_width); 1972 break; 1973 case ICE_AQC_CAPS_RXQS: 1974 caps->num_rxq = number; 1975 caps->rxq_first_id = phys_id; 1976 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 1977 caps->num_rxq); 1978 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 1979 caps->rxq_first_id); 1980 break; 1981 case ICE_AQC_CAPS_TXQS: 1982 caps->num_txq = number; 1983 caps->txq_first_id = phys_id; 1984 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 1985 caps->num_txq); 1986 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 1987 caps->txq_first_id); 1988 break; 1989 case ICE_AQC_CAPS_MSIX: 1990 caps->num_msix_vectors = number; 1991 caps->msix_vector_first_id = phys_id; 1992 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 1993 caps->num_msix_vectors); 1994 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 1995 caps->msix_vector_first_id); 1996 break; 1997 case ICE_AQC_CAPS_PENDING_NVM_VER: 1998 caps->nvm_update_pending_nvm = true; 1999 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 2000 break; 2001 case ICE_AQC_CAPS_PENDING_OROM_VER: 2002 caps->nvm_update_pending_orom = true; 2003 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 2004 break; 2005 case ICE_AQC_CAPS_PENDING_NET_VER: 2006 caps->nvm_update_pending_netlist = true; 2007 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 2008 break; 2009 case ICE_AQC_CAPS_NVM_MGMT: 2010 caps->nvm_unified_update = 2011 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 2012 true : false; 2013 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 2014 caps->nvm_unified_update); 2015 break; 2016 case ICE_AQC_CAPS_RDMA: 2017 caps->rdma = (number == 1); 2018 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 2019 break; 2020 case ICE_AQC_CAPS_MAX_MTU: 2021 caps->max_mtu = number; 2022 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 2023 prefix, caps->max_mtu); 2024 break; 2025 default: 2026 /* Not one of the recognized common capabilities */ 2027 found = false; 2028 } 2029 2030 return found; 2031 } 2032 2033 /** 2034 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 2035 * @hw: pointer to the HW structure 2036 * @caps: pointer to capabilities structure to fix 2037 * 2038 * Re-calculate the capabilities that are dependent on the number of physical 2039 * ports; i.e. some features are not supported or function differently on 2040 * devices with more than 4 ports. 2041 */ 2042 static void 2043 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 2044 { 2045 /* This assumes device capabilities are always scanned before function 2046 * capabilities during the initialization flow. 2047 */ 2048 if (hw->dev_caps.num_funcs > 4) { 2049 /* Max 4 TCs per port */ 2050 caps->maxtc = 4; 2051 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 2052 caps->maxtc); 2053 if (caps->rdma) { 2054 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 2055 caps->rdma = 0; 2056 } 2057 2058 /* print message only when processing device capabilities 2059 * during initialization. 2060 */ 2061 if (caps == &hw->dev_caps.common_cap) 2062 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 2063 } 2064 } 2065 2066 /** 2067 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 2068 * @hw: pointer to the HW struct 2069 * @func_p: pointer to function capabilities structure 2070 * @cap: pointer to the capability element to parse 2071 * 2072 * Extract function capabilities for ICE_AQC_CAPS_VF. 2073 */ 2074 static void 2075 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2076 struct ice_aqc_list_caps_elem *cap) 2077 { 2078 u32 logical_id = le32_to_cpu(cap->logical_id); 2079 u32 number = le32_to_cpu(cap->number); 2080 2081 func_p->num_allocd_vfs = number; 2082 func_p->vf_base_id = logical_id; 2083 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2084 func_p->num_allocd_vfs); 2085 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2086 func_p->vf_base_id); 2087 } 2088 2089 /** 2090 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2091 * @hw: pointer to the HW struct 2092 * @func_p: pointer to function capabilities structure 2093 * @cap: pointer to the capability element to parse 2094 * 2095 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2096 */ 2097 static void 2098 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2099 struct ice_aqc_list_caps_elem *cap) 2100 { 2101 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2102 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2103 le32_to_cpu(cap->number)); 2104 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2105 func_p->guar_num_vsi); 2106 } 2107 2108 /** 2109 * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps 2110 * @hw: pointer to the HW struct 2111 * @func_p: pointer to function capabilities structure 2112 * @cap: pointer to the capability element to parse 2113 * 2114 * Extract function capabilities for ICE_AQC_CAPS_1588. 2115 */ 2116 static void 2117 ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2118 struct ice_aqc_list_caps_elem *cap) 2119 { 2120 struct ice_ts_func_info *info = &func_p->ts_func_info; 2121 u32 number = le32_to_cpu(cap->number); 2122 2123 info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0); 2124 func_p->common_cap.ieee_1588 = info->ena; 2125 2126 info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0); 2127 info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0); 2128 info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); 2129 info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); 2130 2131 info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S; 2132 info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); 2133 2134 ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n", 2135 func_p->common_cap.ieee_1588); 2136 ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n", 2137 info->src_tmr_owned); 2138 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n", 2139 info->tmr_ena); 2140 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n", 2141 info->tmr_index_owned); 2142 ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n", 2143 info->tmr_index_assoc); 2144 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n", 2145 info->clk_freq); 2146 ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n", 2147 info->clk_src); 2148 } 2149 2150 /** 2151 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2152 * @hw: pointer to the HW struct 2153 * @func_p: pointer to function capabilities structure 2154 * 2155 * Extract function capabilities for ICE_AQC_CAPS_FD. 2156 */ 2157 static void 2158 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2159 { 2160 u32 reg_val, val; 2161 2162 reg_val = rd32(hw, GLQF_FD_SIZE); 2163 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 2164 GLQF_FD_SIZE_FD_GSIZE_S; 2165 func_p->fd_fltr_guar = 2166 ice_get_num_per_func(hw, val); 2167 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 2168 GLQF_FD_SIZE_FD_BSIZE_S; 2169 func_p->fd_fltr_best_effort = val; 2170 2171 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2172 func_p->fd_fltr_guar); 2173 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2174 func_p->fd_fltr_best_effort); 2175 } 2176 2177 /** 2178 * ice_parse_func_caps - Parse function capabilities 2179 * @hw: pointer to the HW struct 2180 * @func_p: pointer to function capabilities structure 2181 * @buf: buffer containing the function capability records 2182 * @cap_count: the number of capabilities 2183 * 2184 * Helper function to parse function (0x000A) capabilities list. For 2185 * capabilities shared between device and function, this relies on 2186 * ice_parse_common_caps. 2187 * 2188 * Loop through the list of provided capabilities and extract the relevant 2189 * data into the function capabilities structured. 2190 */ 2191 static void 2192 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2193 void *buf, u32 cap_count) 2194 { 2195 struct ice_aqc_list_caps_elem *cap_resp; 2196 u32 i; 2197 2198 cap_resp = buf; 2199 2200 memset(func_p, 0, sizeof(*func_p)); 2201 2202 for (i = 0; i < cap_count; i++) { 2203 u16 cap = le16_to_cpu(cap_resp[i].cap); 2204 bool found; 2205 2206 found = ice_parse_common_caps(hw, &func_p->common_cap, 2207 &cap_resp[i], "func caps"); 2208 2209 switch (cap) { 2210 case ICE_AQC_CAPS_VF: 2211 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2212 break; 2213 case ICE_AQC_CAPS_VSI: 2214 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2215 break; 2216 case ICE_AQC_CAPS_1588: 2217 ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]); 2218 break; 2219 case ICE_AQC_CAPS_FD: 2220 ice_parse_fdir_func_caps(hw, func_p); 2221 break; 2222 default: 2223 /* Don't list common capabilities as unknown */ 2224 if (!found) 2225 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2226 i, cap); 2227 break; 2228 } 2229 } 2230 2231 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2232 } 2233 2234 /** 2235 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2236 * @hw: pointer to the HW struct 2237 * @dev_p: pointer to device capabilities structure 2238 * @cap: capability element to parse 2239 * 2240 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2241 */ 2242 static void 2243 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2244 struct ice_aqc_list_caps_elem *cap) 2245 { 2246 u32 number = le32_to_cpu(cap->number); 2247 2248 dev_p->num_funcs = hweight32(number); 2249 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2250 dev_p->num_funcs); 2251 } 2252 2253 /** 2254 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2255 * @hw: pointer to the HW struct 2256 * @dev_p: pointer to device capabilities structure 2257 * @cap: capability element to parse 2258 * 2259 * Parse ICE_AQC_CAPS_VF for device capabilities. 2260 */ 2261 static void 2262 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2263 struct ice_aqc_list_caps_elem *cap) 2264 { 2265 u32 number = le32_to_cpu(cap->number); 2266 2267 dev_p->num_vfs_exposed = number; 2268 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2269 dev_p->num_vfs_exposed); 2270 } 2271 2272 /** 2273 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2274 * @hw: pointer to the HW struct 2275 * @dev_p: pointer to device capabilities structure 2276 * @cap: capability element to parse 2277 * 2278 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2279 */ 2280 static void 2281 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2282 struct ice_aqc_list_caps_elem *cap) 2283 { 2284 u32 number = le32_to_cpu(cap->number); 2285 2286 dev_p->num_vsi_allocd_to_host = number; 2287 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2288 dev_p->num_vsi_allocd_to_host); 2289 } 2290 2291 /** 2292 * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps 2293 * @hw: pointer to the HW struct 2294 * @dev_p: pointer to device capabilities structure 2295 * @cap: capability element to parse 2296 * 2297 * Parse ICE_AQC_CAPS_1588 for device capabilities. 2298 */ 2299 static void 2300 ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2301 struct ice_aqc_list_caps_elem *cap) 2302 { 2303 struct ice_ts_dev_info *info = &dev_p->ts_dev_info; 2304 u32 logical_id = le32_to_cpu(cap->logical_id); 2305 u32 phys_id = le32_to_cpu(cap->phys_id); 2306 u32 number = le32_to_cpu(cap->number); 2307 2308 info->ena = ((number & ICE_TS_DEV_ENA_M) != 0); 2309 dev_p->common_cap.ieee_1588 = info->ena; 2310 2311 info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M; 2312 info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0); 2313 info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0); 2314 2315 info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S; 2316 info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0); 2317 info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0); 2318 2319 info->ena_ports = logical_id; 2320 info->tmr_own_map = phys_id; 2321 2322 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n", 2323 dev_p->common_cap.ieee_1588); 2324 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n", 2325 info->tmr0_owner); 2326 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n", 2327 info->tmr0_owned); 2328 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n", 2329 info->tmr0_ena); 2330 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n", 2331 info->tmr1_owner); 2332 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n", 2333 info->tmr1_owned); 2334 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n", 2335 info->tmr1_ena); 2336 ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", 2337 info->ena_ports); 2338 ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", 2339 info->tmr_own_map); 2340 } 2341 2342 /** 2343 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2344 * @hw: pointer to the HW struct 2345 * @dev_p: pointer to device capabilities structure 2346 * @cap: capability element to parse 2347 * 2348 * Parse ICE_AQC_CAPS_FD for device capabilities. 2349 */ 2350 static void 2351 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2352 struct ice_aqc_list_caps_elem *cap) 2353 { 2354 u32 number = le32_to_cpu(cap->number); 2355 2356 dev_p->num_flow_director_fltr = number; 2357 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2358 dev_p->num_flow_director_fltr); 2359 } 2360 2361 /** 2362 * ice_parse_dev_caps - Parse device capabilities 2363 * @hw: pointer to the HW struct 2364 * @dev_p: pointer to device capabilities structure 2365 * @buf: buffer containing the device capability records 2366 * @cap_count: the number of capabilities 2367 * 2368 * Helper device to parse device (0x000B) capabilities list. For 2369 * capabilities shared between device and function, this relies on 2370 * ice_parse_common_caps. 2371 * 2372 * Loop through the list of provided capabilities and extract the relevant 2373 * data into the device capabilities structured. 2374 */ 2375 static void 2376 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2377 void *buf, u32 cap_count) 2378 { 2379 struct ice_aqc_list_caps_elem *cap_resp; 2380 u32 i; 2381 2382 cap_resp = buf; 2383 2384 memset(dev_p, 0, sizeof(*dev_p)); 2385 2386 for (i = 0; i < cap_count; i++) { 2387 u16 cap = le16_to_cpu(cap_resp[i].cap); 2388 bool found; 2389 2390 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2391 &cap_resp[i], "dev caps"); 2392 2393 switch (cap) { 2394 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2395 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2396 break; 2397 case ICE_AQC_CAPS_VF: 2398 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2399 break; 2400 case ICE_AQC_CAPS_VSI: 2401 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2402 break; 2403 case ICE_AQC_CAPS_1588: 2404 ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]); 2405 break; 2406 case ICE_AQC_CAPS_FD: 2407 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2408 break; 2409 default: 2410 /* Don't list common capabilities as unknown */ 2411 if (!found) 2412 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2413 i, cap); 2414 break; 2415 } 2416 } 2417 2418 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2419 } 2420 2421 /** 2422 * ice_aq_list_caps - query function/device capabilities 2423 * @hw: pointer to the HW struct 2424 * @buf: a buffer to hold the capabilities 2425 * @buf_size: size of the buffer 2426 * @cap_count: if not NULL, set to the number of capabilities reported 2427 * @opc: capabilities type to discover, device or function 2428 * @cd: pointer to command details structure or NULL 2429 * 2430 * Get the function (0x000A) or device (0x000B) capabilities description from 2431 * firmware and store it in the buffer. 2432 * 2433 * If the cap_count pointer is not NULL, then it is set to the number of 2434 * capabilities firmware will report. Note that if the buffer size is too 2435 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2436 * cap_count will still be updated in this case. It is recommended that the 2437 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2438 * firmware could return) to avoid this. 2439 */ 2440 enum ice_status 2441 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2442 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2443 { 2444 struct ice_aqc_list_caps *cmd; 2445 struct ice_aq_desc desc; 2446 enum ice_status status; 2447 2448 cmd = &desc.params.get_cap; 2449 2450 if (opc != ice_aqc_opc_list_func_caps && 2451 opc != ice_aqc_opc_list_dev_caps) 2452 return ICE_ERR_PARAM; 2453 2454 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2455 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2456 2457 if (cap_count) 2458 *cap_count = le32_to_cpu(cmd->count); 2459 2460 return status; 2461 } 2462 2463 /** 2464 * ice_discover_dev_caps - Read and extract device capabilities 2465 * @hw: pointer to the hardware structure 2466 * @dev_caps: pointer to device capabilities structure 2467 * 2468 * Read the device capabilities and extract them into the dev_caps structure 2469 * for later use. 2470 */ 2471 enum ice_status 2472 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2473 { 2474 enum ice_status status; 2475 u32 cap_count = 0; 2476 void *cbuf; 2477 2478 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2479 if (!cbuf) 2480 return ICE_ERR_NO_MEMORY; 2481 2482 /* Although the driver doesn't know the number of capabilities the 2483 * device will return, we can simply send a 4KB buffer, the maximum 2484 * possible size that firmware can return. 2485 */ 2486 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2487 2488 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2489 ice_aqc_opc_list_dev_caps, NULL); 2490 if (!status) 2491 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2492 kfree(cbuf); 2493 2494 return status; 2495 } 2496 2497 /** 2498 * ice_discover_func_caps - Read and extract function capabilities 2499 * @hw: pointer to the hardware structure 2500 * @func_caps: pointer to function capabilities structure 2501 * 2502 * Read the function capabilities and extract them into the func_caps structure 2503 * for later use. 2504 */ 2505 static enum ice_status 2506 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2507 { 2508 enum ice_status status; 2509 u32 cap_count = 0; 2510 void *cbuf; 2511 2512 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2513 if (!cbuf) 2514 return ICE_ERR_NO_MEMORY; 2515 2516 /* Although the driver doesn't know the number of capabilities the 2517 * device will return, we can simply send a 4KB buffer, the maximum 2518 * possible size that firmware can return. 2519 */ 2520 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2521 2522 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2523 ice_aqc_opc_list_func_caps, NULL); 2524 if (!status) 2525 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2526 kfree(cbuf); 2527 2528 return status; 2529 } 2530 2531 /** 2532 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2533 * @hw: pointer to the hardware structure 2534 */ 2535 void ice_set_safe_mode_caps(struct ice_hw *hw) 2536 { 2537 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2538 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2539 struct ice_hw_common_caps cached_caps; 2540 u32 num_funcs; 2541 2542 /* cache some func_caps values that should be restored after memset */ 2543 cached_caps = func_caps->common_cap; 2544 2545 /* unset func capabilities */ 2546 memset(func_caps, 0, sizeof(*func_caps)); 2547 2548 #define ICE_RESTORE_FUNC_CAP(name) \ 2549 func_caps->common_cap.name = cached_caps.name 2550 2551 /* restore cached values */ 2552 ICE_RESTORE_FUNC_CAP(valid_functions); 2553 ICE_RESTORE_FUNC_CAP(txq_first_id); 2554 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2555 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2556 ICE_RESTORE_FUNC_CAP(max_mtu); 2557 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2558 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2559 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2560 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2561 2562 /* one Tx and one Rx queue in safe mode */ 2563 func_caps->common_cap.num_rxq = 1; 2564 func_caps->common_cap.num_txq = 1; 2565 2566 /* two MSIX vectors, one for traffic and one for misc causes */ 2567 func_caps->common_cap.num_msix_vectors = 2; 2568 func_caps->guar_num_vsi = 1; 2569 2570 /* cache some dev_caps values that should be restored after memset */ 2571 cached_caps = dev_caps->common_cap; 2572 num_funcs = dev_caps->num_funcs; 2573 2574 /* unset dev capabilities */ 2575 memset(dev_caps, 0, sizeof(*dev_caps)); 2576 2577 #define ICE_RESTORE_DEV_CAP(name) \ 2578 dev_caps->common_cap.name = cached_caps.name 2579 2580 /* restore cached values */ 2581 ICE_RESTORE_DEV_CAP(valid_functions); 2582 ICE_RESTORE_DEV_CAP(txq_first_id); 2583 ICE_RESTORE_DEV_CAP(rxq_first_id); 2584 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2585 ICE_RESTORE_DEV_CAP(max_mtu); 2586 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2587 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2588 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2589 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2590 dev_caps->num_funcs = num_funcs; 2591 2592 /* one Tx and one Rx queue per function in safe mode */ 2593 dev_caps->common_cap.num_rxq = num_funcs; 2594 dev_caps->common_cap.num_txq = num_funcs; 2595 2596 /* two MSIX vectors per function */ 2597 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2598 } 2599 2600 /** 2601 * ice_get_caps - get info about the HW 2602 * @hw: pointer to the hardware structure 2603 */ 2604 enum ice_status ice_get_caps(struct ice_hw *hw) 2605 { 2606 enum ice_status status; 2607 2608 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2609 if (status) 2610 return status; 2611 2612 return ice_discover_func_caps(hw, &hw->func_caps); 2613 } 2614 2615 /** 2616 * ice_aq_manage_mac_write - manage MAC address write command 2617 * @hw: pointer to the HW struct 2618 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2619 * @flags: flags to control write behavior 2620 * @cd: pointer to command details structure or NULL 2621 * 2622 * This function is used to write MAC address to the NVM (0x0108). 2623 */ 2624 enum ice_status 2625 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2626 struct ice_sq_cd *cd) 2627 { 2628 struct ice_aqc_manage_mac_write *cmd; 2629 struct ice_aq_desc desc; 2630 2631 cmd = &desc.params.mac_write; 2632 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2633 2634 cmd->flags = flags; 2635 ether_addr_copy(cmd->mac_addr, mac_addr); 2636 2637 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2638 } 2639 2640 /** 2641 * ice_aq_clear_pxe_mode 2642 * @hw: pointer to the HW struct 2643 * 2644 * Tell the firmware that the driver is taking over from PXE (0x0110). 2645 */ 2646 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 2647 { 2648 struct ice_aq_desc desc; 2649 2650 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2651 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2652 2653 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2654 } 2655 2656 /** 2657 * ice_clear_pxe_mode - clear pxe operations mode 2658 * @hw: pointer to the HW struct 2659 * 2660 * Make sure all PXE mode settings are cleared, including things 2661 * like descriptor fetch/write-back mode. 2662 */ 2663 void ice_clear_pxe_mode(struct ice_hw *hw) 2664 { 2665 if (ice_check_sq_alive(hw, &hw->adminq)) 2666 ice_aq_clear_pxe_mode(hw); 2667 } 2668 2669 /** 2670 * ice_get_link_speed_based_on_phy_type - returns link speed 2671 * @phy_type_low: lower part of phy_type 2672 * @phy_type_high: higher part of phy_type 2673 * 2674 * This helper function will convert an entry in PHY type structure 2675 * [phy_type_low, phy_type_high] to its corresponding link speed. 2676 * Note: In the structure of [phy_type_low, phy_type_high], there should 2677 * be one bit set, as this function will convert one PHY type to its 2678 * speed. 2679 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2680 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2681 */ 2682 static u16 2683 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 2684 { 2685 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2686 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2687 2688 switch (phy_type_low) { 2689 case ICE_PHY_TYPE_LOW_100BASE_TX: 2690 case ICE_PHY_TYPE_LOW_100M_SGMII: 2691 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 2692 break; 2693 case ICE_PHY_TYPE_LOW_1000BASE_T: 2694 case ICE_PHY_TYPE_LOW_1000BASE_SX: 2695 case ICE_PHY_TYPE_LOW_1000BASE_LX: 2696 case ICE_PHY_TYPE_LOW_1000BASE_KX: 2697 case ICE_PHY_TYPE_LOW_1G_SGMII: 2698 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 2699 break; 2700 case ICE_PHY_TYPE_LOW_2500BASE_T: 2701 case ICE_PHY_TYPE_LOW_2500BASE_X: 2702 case ICE_PHY_TYPE_LOW_2500BASE_KX: 2703 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 2704 break; 2705 case ICE_PHY_TYPE_LOW_5GBASE_T: 2706 case ICE_PHY_TYPE_LOW_5GBASE_KR: 2707 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 2708 break; 2709 case ICE_PHY_TYPE_LOW_10GBASE_T: 2710 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 2711 case ICE_PHY_TYPE_LOW_10GBASE_SR: 2712 case ICE_PHY_TYPE_LOW_10GBASE_LR: 2713 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 2714 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 2715 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 2716 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 2717 break; 2718 case ICE_PHY_TYPE_LOW_25GBASE_T: 2719 case ICE_PHY_TYPE_LOW_25GBASE_CR: 2720 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 2721 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 2722 case ICE_PHY_TYPE_LOW_25GBASE_SR: 2723 case ICE_PHY_TYPE_LOW_25GBASE_LR: 2724 case ICE_PHY_TYPE_LOW_25GBASE_KR: 2725 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 2726 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 2727 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 2728 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 2729 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 2730 break; 2731 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 2732 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 2733 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 2734 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 2735 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 2736 case ICE_PHY_TYPE_LOW_40G_XLAUI: 2737 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 2738 break; 2739 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 2740 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 2741 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 2742 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 2743 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 2744 case ICE_PHY_TYPE_LOW_50G_LAUI2: 2745 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 2746 case ICE_PHY_TYPE_LOW_50G_AUI2: 2747 case ICE_PHY_TYPE_LOW_50GBASE_CP: 2748 case ICE_PHY_TYPE_LOW_50GBASE_SR: 2749 case ICE_PHY_TYPE_LOW_50GBASE_FR: 2750 case ICE_PHY_TYPE_LOW_50GBASE_LR: 2751 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 2752 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 2753 case ICE_PHY_TYPE_LOW_50G_AUI1: 2754 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 2755 break; 2756 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 2757 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 2758 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 2759 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 2760 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 2761 case ICE_PHY_TYPE_LOW_100G_CAUI4: 2762 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 2763 case ICE_PHY_TYPE_LOW_100G_AUI4: 2764 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 2765 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 2766 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 2767 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 2768 case ICE_PHY_TYPE_LOW_100GBASE_DR: 2769 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 2770 break; 2771 default: 2772 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2773 break; 2774 } 2775 2776 switch (phy_type_high) { 2777 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 2778 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 2779 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 2780 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 2781 case ICE_PHY_TYPE_HIGH_100G_AUI2: 2782 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 2783 break; 2784 default: 2785 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2786 break; 2787 } 2788 2789 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 2790 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2791 return ICE_AQ_LINK_SPEED_UNKNOWN; 2792 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2793 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 2794 return ICE_AQ_LINK_SPEED_UNKNOWN; 2795 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2796 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2797 return speed_phy_type_low; 2798 else 2799 return speed_phy_type_high; 2800 } 2801 2802 /** 2803 * ice_update_phy_type 2804 * @phy_type_low: pointer to the lower part of phy_type 2805 * @phy_type_high: pointer to the higher part of phy_type 2806 * @link_speeds_bitmap: targeted link speeds bitmap 2807 * 2808 * Note: For the link_speeds_bitmap structure, you can check it at 2809 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 2810 * link_speeds_bitmap include multiple speeds. 2811 * 2812 * Each entry in this [phy_type_low, phy_type_high] structure will 2813 * present a certain link speed. This helper function will turn on bits 2814 * in [phy_type_low, phy_type_high] structure based on the value of 2815 * link_speeds_bitmap input parameter. 2816 */ 2817 void 2818 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 2819 u16 link_speeds_bitmap) 2820 { 2821 u64 pt_high; 2822 u64 pt_low; 2823 int index; 2824 u16 speed; 2825 2826 /* We first check with low part of phy_type */ 2827 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 2828 pt_low = BIT_ULL(index); 2829 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 2830 2831 if (link_speeds_bitmap & speed) 2832 *phy_type_low |= BIT_ULL(index); 2833 } 2834 2835 /* We then check with high part of phy_type */ 2836 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 2837 pt_high = BIT_ULL(index); 2838 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 2839 2840 if (link_speeds_bitmap & speed) 2841 *phy_type_high |= BIT_ULL(index); 2842 } 2843 } 2844 2845 /** 2846 * ice_aq_set_phy_cfg 2847 * @hw: pointer to the HW struct 2848 * @pi: port info structure of the interested logical port 2849 * @cfg: structure with PHY configuration data to be set 2850 * @cd: pointer to command details structure or NULL 2851 * 2852 * Set the various PHY configuration parameters supported on the Port. 2853 * One or more of the Set PHY config parameters may be ignored in an MFP 2854 * mode as the PF may not have the privilege to set some of the PHY Config 2855 * parameters. This status will be indicated by the command response (0x0601). 2856 */ 2857 enum ice_status 2858 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 2859 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 2860 { 2861 struct ice_aq_desc desc; 2862 enum ice_status status; 2863 2864 if (!cfg) 2865 return ICE_ERR_PARAM; 2866 2867 /* Ensure that only valid bits of cfg->caps can be turned on. */ 2868 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 2869 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 2870 cfg->caps); 2871 2872 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 2873 } 2874 2875 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 2876 desc.params.set_phy.lport_num = pi->lport; 2877 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2878 2879 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 2880 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 2881 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 2882 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 2883 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 2884 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 2885 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 2886 cfg->low_power_ctrl_an); 2887 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 2888 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 2889 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 2890 cfg->link_fec_opt); 2891 2892 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 2893 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 2894 status = 0; 2895 2896 if (!status) 2897 pi->phy.curr_user_phy_cfg = *cfg; 2898 2899 return status; 2900 } 2901 2902 /** 2903 * ice_update_link_info - update status of the HW network link 2904 * @pi: port info structure of the interested logical port 2905 */ 2906 enum ice_status ice_update_link_info(struct ice_port_info *pi) 2907 { 2908 struct ice_link_status *li; 2909 enum ice_status status; 2910 2911 if (!pi) 2912 return ICE_ERR_PARAM; 2913 2914 li = &pi->phy.link_info; 2915 2916 status = ice_aq_get_link_info(pi, true, NULL, NULL); 2917 if (status) 2918 return status; 2919 2920 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 2921 struct ice_aqc_get_phy_caps_data *pcaps; 2922 struct ice_hw *hw; 2923 2924 hw = pi->hw; 2925 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 2926 GFP_KERNEL); 2927 if (!pcaps) 2928 return ICE_ERR_NO_MEMORY; 2929 2930 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2931 pcaps, NULL); 2932 2933 devm_kfree(ice_hw_to_dev(hw), pcaps); 2934 } 2935 2936 return status; 2937 } 2938 2939 /** 2940 * ice_cache_phy_user_req 2941 * @pi: port information structure 2942 * @cache_data: PHY logging data 2943 * @cache_mode: PHY logging mode 2944 * 2945 * Log the user request on (FC, FEC, SPEED) for later use. 2946 */ 2947 static void 2948 ice_cache_phy_user_req(struct ice_port_info *pi, 2949 struct ice_phy_cache_mode_data cache_data, 2950 enum ice_phy_cache_mode cache_mode) 2951 { 2952 if (!pi) 2953 return; 2954 2955 switch (cache_mode) { 2956 case ICE_FC_MODE: 2957 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 2958 break; 2959 case ICE_SPEED_MODE: 2960 pi->phy.curr_user_speed_req = 2961 cache_data.data.curr_user_speed_req; 2962 break; 2963 case ICE_FEC_MODE: 2964 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 2965 break; 2966 default: 2967 break; 2968 } 2969 } 2970 2971 /** 2972 * ice_caps_to_fc_mode 2973 * @caps: PHY capabilities 2974 * 2975 * Convert PHY FC capabilities to ice FC mode 2976 */ 2977 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 2978 { 2979 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 2980 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2981 return ICE_FC_FULL; 2982 2983 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 2984 return ICE_FC_TX_PAUSE; 2985 2986 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2987 return ICE_FC_RX_PAUSE; 2988 2989 return ICE_FC_NONE; 2990 } 2991 2992 /** 2993 * ice_caps_to_fec_mode 2994 * @caps: PHY capabilities 2995 * @fec_options: Link FEC options 2996 * 2997 * Convert PHY FEC capabilities to ice FEC mode 2998 */ 2999 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 3000 { 3001 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 3002 return ICE_FEC_AUTO; 3003 3004 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3005 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3006 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 3007 ICE_AQC_PHY_FEC_25G_KR_REQ)) 3008 return ICE_FEC_BASER; 3009 3010 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3011 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 3012 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 3013 return ICE_FEC_RS; 3014 3015 return ICE_FEC_NONE; 3016 } 3017 3018 /** 3019 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 3020 * @pi: port information structure 3021 * @cfg: PHY configuration data to set FC mode 3022 * @req_mode: FC mode to configure 3023 */ 3024 enum ice_status 3025 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3026 enum ice_fc_mode req_mode) 3027 { 3028 struct ice_phy_cache_mode_data cache_data; 3029 u8 pause_mask = 0x0; 3030 3031 if (!pi || !cfg) 3032 return ICE_ERR_BAD_PTR; 3033 3034 switch (req_mode) { 3035 case ICE_FC_FULL: 3036 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3037 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3038 break; 3039 case ICE_FC_RX_PAUSE: 3040 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 3041 break; 3042 case ICE_FC_TX_PAUSE: 3043 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 3044 break; 3045 default: 3046 break; 3047 } 3048 3049 /* clear the old pause settings */ 3050 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 3051 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 3052 3053 /* set the new capabilities */ 3054 cfg->caps |= pause_mask; 3055 3056 /* Cache user FC request */ 3057 cache_data.data.curr_user_fc_req = req_mode; 3058 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 3059 3060 return 0; 3061 } 3062 3063 /** 3064 * ice_set_fc 3065 * @pi: port information structure 3066 * @aq_failures: pointer to status code, specific to ice_set_fc routine 3067 * @ena_auto_link_update: enable automatic link update 3068 * 3069 * Set the requested flow control mode. 3070 */ 3071 enum ice_status 3072 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 3073 { 3074 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 3075 struct ice_aqc_get_phy_caps_data *pcaps; 3076 enum ice_status status; 3077 struct ice_hw *hw; 3078 3079 if (!pi || !aq_failures) 3080 return ICE_ERR_BAD_PTR; 3081 3082 *aq_failures = 0; 3083 hw = pi->hw; 3084 3085 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 3086 if (!pcaps) 3087 return ICE_ERR_NO_MEMORY; 3088 3089 /* Get the current PHY config */ 3090 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 3091 pcaps, NULL); 3092 if (status) { 3093 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 3094 goto out; 3095 } 3096 3097 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 3098 3099 /* Configure the set PHY data */ 3100 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 3101 if (status) 3102 goto out; 3103 3104 /* If the capabilities have changed, then set the new config */ 3105 if (cfg.caps != pcaps->caps) { 3106 int retry_count, retry_max = 10; 3107 3108 /* Auto restart link so settings take effect */ 3109 if (ena_auto_link_update) 3110 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3111 3112 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 3113 if (status) { 3114 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 3115 goto out; 3116 } 3117 3118 /* Update the link info 3119 * It sometimes takes a really long time for link to 3120 * come back from the atomic reset. Thus, we wait a 3121 * little bit. 3122 */ 3123 for (retry_count = 0; retry_count < retry_max; retry_count++) { 3124 status = ice_update_link_info(pi); 3125 3126 if (!status) 3127 break; 3128 3129 mdelay(100); 3130 } 3131 3132 if (status) 3133 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 3134 } 3135 3136 out: 3137 devm_kfree(ice_hw_to_dev(hw), pcaps); 3138 return status; 3139 } 3140 3141 /** 3142 * ice_phy_caps_equals_cfg 3143 * @phy_caps: PHY capabilities 3144 * @phy_cfg: PHY configuration 3145 * 3146 * Helper function to determine if PHY capabilities matches PHY 3147 * configuration 3148 */ 3149 bool 3150 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 3151 struct ice_aqc_set_phy_cfg_data *phy_cfg) 3152 { 3153 u8 caps_mask, cfg_mask; 3154 3155 if (!phy_caps || !phy_cfg) 3156 return false; 3157 3158 /* These bits are not common between capabilities and configuration. 3159 * Do not use them to determine equality. 3160 */ 3161 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 3162 ICE_AQC_GET_PHY_EN_MOD_QUAL); 3163 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 3164 3165 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 3166 phy_caps->phy_type_high != phy_cfg->phy_type_high || 3167 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 3168 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 3169 phy_caps->eee_cap != phy_cfg->eee_cap || 3170 phy_caps->eeer_value != phy_cfg->eeer_value || 3171 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3172 return false; 3173 3174 return true; 3175 } 3176 3177 /** 3178 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3179 * @pi: port information structure 3180 * @caps: PHY ability structure to copy date from 3181 * @cfg: PHY configuration structure to copy data to 3182 * 3183 * Helper function to copy AQC PHY get ability data to PHY set configuration 3184 * data structure 3185 */ 3186 void 3187 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3188 struct ice_aqc_get_phy_caps_data *caps, 3189 struct ice_aqc_set_phy_cfg_data *cfg) 3190 { 3191 if (!pi || !caps || !cfg) 3192 return; 3193 3194 memset(cfg, 0, sizeof(*cfg)); 3195 cfg->phy_type_low = caps->phy_type_low; 3196 cfg->phy_type_high = caps->phy_type_high; 3197 cfg->caps = caps->caps; 3198 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3199 cfg->eee_cap = caps->eee_cap; 3200 cfg->eeer_value = caps->eeer_value; 3201 cfg->link_fec_opt = caps->link_fec_options; 3202 cfg->module_compliance_enforcement = 3203 caps->module_compliance_enforcement; 3204 } 3205 3206 /** 3207 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3208 * @pi: port information structure 3209 * @cfg: PHY configuration data to set FEC mode 3210 * @fec: FEC mode to configure 3211 */ 3212 enum ice_status 3213 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3214 enum ice_fec_mode fec) 3215 { 3216 struct ice_aqc_get_phy_caps_data *pcaps; 3217 enum ice_status status; 3218 struct ice_hw *hw; 3219 3220 if (!pi || !cfg) 3221 return ICE_ERR_BAD_PTR; 3222 3223 hw = pi->hw; 3224 3225 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3226 if (!pcaps) 3227 return ICE_ERR_NO_MEMORY; 3228 3229 status = ice_aq_get_phy_caps(pi, false, 3230 (ice_fw_supports_report_dflt_cfg(hw) ? 3231 ICE_AQC_REPORT_DFLT_CFG : 3232 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3233 if (status) 3234 goto out; 3235 3236 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3237 cfg->link_fec_opt = pcaps->link_fec_options; 3238 3239 switch (fec) { 3240 case ICE_FEC_BASER: 3241 /* Clear RS bits, and AND BASE-R ability 3242 * bits and OR request bits. 3243 */ 3244 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3245 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3246 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3247 ICE_AQC_PHY_FEC_25G_KR_REQ; 3248 break; 3249 case ICE_FEC_RS: 3250 /* Clear BASE-R bits, and AND RS ability 3251 * bits and OR request bits. 3252 */ 3253 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3254 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3255 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3256 break; 3257 case ICE_FEC_NONE: 3258 /* Clear all FEC option bits. */ 3259 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3260 break; 3261 case ICE_FEC_AUTO: 3262 /* AND auto FEC bit, and all caps bits. */ 3263 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3264 cfg->link_fec_opt |= pcaps->link_fec_options; 3265 break; 3266 default: 3267 status = ICE_ERR_PARAM; 3268 break; 3269 } 3270 3271 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3272 !ice_fw_supports_report_dflt_cfg(hw)) { 3273 struct ice_link_default_override_tlv tlv; 3274 3275 if (ice_get_link_default_override(&tlv, pi)) 3276 goto out; 3277 3278 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3279 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3280 cfg->link_fec_opt = tlv.fec_options; 3281 } 3282 3283 out: 3284 kfree(pcaps); 3285 3286 return status; 3287 } 3288 3289 /** 3290 * ice_get_link_status - get status of the HW network link 3291 * @pi: port information structure 3292 * @link_up: pointer to bool (true/false = linkup/linkdown) 3293 * 3294 * Variable link_up is true if link is up, false if link is down. 3295 * The variable link_up is invalid if status is non zero. As a 3296 * result of this call, link status reporting becomes enabled 3297 */ 3298 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3299 { 3300 struct ice_phy_info *phy_info; 3301 enum ice_status status = 0; 3302 3303 if (!pi || !link_up) 3304 return ICE_ERR_PARAM; 3305 3306 phy_info = &pi->phy; 3307 3308 if (phy_info->get_link_info) { 3309 status = ice_update_link_info(pi); 3310 3311 if (status) 3312 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3313 status); 3314 } 3315 3316 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3317 3318 return status; 3319 } 3320 3321 /** 3322 * ice_aq_set_link_restart_an 3323 * @pi: pointer to the port information structure 3324 * @ena_link: if true: enable link, if false: disable link 3325 * @cd: pointer to command details structure or NULL 3326 * 3327 * Sets up the link and restarts the Auto-Negotiation over the link. 3328 */ 3329 enum ice_status 3330 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3331 struct ice_sq_cd *cd) 3332 { 3333 struct ice_aqc_restart_an *cmd; 3334 struct ice_aq_desc desc; 3335 3336 cmd = &desc.params.restart_an; 3337 3338 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3339 3340 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3341 cmd->lport_num = pi->lport; 3342 if (ena_link) 3343 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3344 else 3345 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3346 3347 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3348 } 3349 3350 /** 3351 * ice_aq_set_event_mask 3352 * @hw: pointer to the HW struct 3353 * @port_num: port number of the physical function 3354 * @mask: event mask to be set 3355 * @cd: pointer to command details structure or NULL 3356 * 3357 * Set event mask (0x0613) 3358 */ 3359 enum ice_status 3360 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3361 struct ice_sq_cd *cd) 3362 { 3363 struct ice_aqc_set_event_mask *cmd; 3364 struct ice_aq_desc desc; 3365 3366 cmd = &desc.params.set_event_mask; 3367 3368 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3369 3370 cmd->lport_num = port_num; 3371 3372 cmd->event_mask = cpu_to_le16(mask); 3373 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3374 } 3375 3376 /** 3377 * ice_aq_set_mac_loopback 3378 * @hw: pointer to the HW struct 3379 * @ena_lpbk: Enable or Disable loopback 3380 * @cd: pointer to command details structure or NULL 3381 * 3382 * Enable/disable loopback on a given port 3383 */ 3384 enum ice_status 3385 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3386 { 3387 struct ice_aqc_set_mac_lb *cmd; 3388 struct ice_aq_desc desc; 3389 3390 cmd = &desc.params.set_mac_lb; 3391 3392 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3393 if (ena_lpbk) 3394 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3395 3396 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3397 } 3398 3399 /** 3400 * ice_aq_set_port_id_led 3401 * @pi: pointer to the port information 3402 * @is_orig_mode: is this LED set to original mode (by the net-list) 3403 * @cd: pointer to command details structure or NULL 3404 * 3405 * Set LED value for the given port (0x06e9) 3406 */ 3407 enum ice_status 3408 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3409 struct ice_sq_cd *cd) 3410 { 3411 struct ice_aqc_set_port_id_led *cmd; 3412 struct ice_hw *hw = pi->hw; 3413 struct ice_aq_desc desc; 3414 3415 cmd = &desc.params.set_port_id_led; 3416 3417 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3418 3419 if (is_orig_mode) 3420 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3421 else 3422 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3423 3424 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3425 } 3426 3427 /** 3428 * ice_aq_sff_eeprom 3429 * @hw: pointer to the HW struct 3430 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3431 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3432 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3433 * @page: QSFP page 3434 * @set_page: set or ignore the page 3435 * @data: pointer to data buffer to be read/written to the I2C device. 3436 * @length: 1-16 for read, 1 for write. 3437 * @write: 0 read, 1 for write. 3438 * @cd: pointer to command details structure or NULL 3439 * 3440 * Read/Write SFF EEPROM (0x06EE) 3441 */ 3442 enum ice_status 3443 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3444 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3445 bool write, struct ice_sq_cd *cd) 3446 { 3447 struct ice_aqc_sff_eeprom *cmd; 3448 struct ice_aq_desc desc; 3449 enum ice_status status; 3450 3451 if (!data || (mem_addr & 0xff00)) 3452 return ICE_ERR_PARAM; 3453 3454 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3455 cmd = &desc.params.read_write_sff_param; 3456 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3457 cmd->lport_num = (u8)(lport & 0xff); 3458 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3459 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 3460 ICE_AQC_SFF_I2CBUS_7BIT_M) | 3461 ((set_page << 3462 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 3463 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 3464 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3465 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 3466 if (write) 3467 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 3468 3469 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3470 return status; 3471 } 3472 3473 /** 3474 * __ice_aq_get_set_rss_lut 3475 * @hw: pointer to the hardware structure 3476 * @params: RSS LUT parameters 3477 * @set: set true to set the table, false to get the table 3478 * 3479 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3480 */ 3481 static enum ice_status 3482 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) 3483 { 3484 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; 3485 struct ice_aqc_get_set_rss_lut *cmd_resp; 3486 struct ice_aq_desc desc; 3487 enum ice_status status; 3488 u8 *lut; 3489 3490 if (!params) 3491 return ICE_ERR_PARAM; 3492 3493 vsi_handle = params->vsi_handle; 3494 lut = params->lut; 3495 3496 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 3497 return ICE_ERR_PARAM; 3498 3499 lut_size = params->lut_size; 3500 lut_type = params->lut_type; 3501 glob_lut_idx = params->global_lut_id; 3502 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3503 3504 cmd_resp = &desc.params.get_set_rss_lut; 3505 3506 if (set) { 3507 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 3508 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3509 } else { 3510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 3511 } 3512 3513 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3514 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 3515 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 3516 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 3517 3518 switch (lut_type) { 3519 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 3520 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 3521 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 3522 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 3523 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 3524 break; 3525 default: 3526 status = ICE_ERR_PARAM; 3527 goto ice_aq_get_set_rss_lut_exit; 3528 } 3529 3530 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 3531 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 3532 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 3533 3534 if (!set) 3535 goto ice_aq_get_set_rss_lut_send; 3536 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3537 if (!set) 3538 goto ice_aq_get_set_rss_lut_send; 3539 } else { 3540 goto ice_aq_get_set_rss_lut_send; 3541 } 3542 3543 /* LUT size is only valid for Global and PF table types */ 3544 switch (lut_size) { 3545 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 3546 break; 3547 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 3548 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 3549 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3550 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3551 break; 3552 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 3553 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3554 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 3555 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3556 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3557 break; 3558 } 3559 fallthrough; 3560 default: 3561 status = ICE_ERR_PARAM; 3562 goto ice_aq_get_set_rss_lut_exit; 3563 } 3564 3565 ice_aq_get_set_rss_lut_send: 3566 cmd_resp->flags = cpu_to_le16(flags); 3567 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 3568 3569 ice_aq_get_set_rss_lut_exit: 3570 return status; 3571 } 3572 3573 /** 3574 * ice_aq_get_rss_lut 3575 * @hw: pointer to the hardware structure 3576 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 3577 * 3578 * get the RSS lookup table, PF or VSI type 3579 */ 3580 enum ice_status 3581 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 3582 { 3583 return __ice_aq_get_set_rss_lut(hw, get_params, false); 3584 } 3585 3586 /** 3587 * ice_aq_set_rss_lut 3588 * @hw: pointer to the hardware structure 3589 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 3590 * 3591 * set the RSS lookup table, PF or VSI type 3592 */ 3593 enum ice_status 3594 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 3595 { 3596 return __ice_aq_get_set_rss_lut(hw, set_params, true); 3597 } 3598 3599 /** 3600 * __ice_aq_get_set_rss_key 3601 * @hw: pointer to the HW struct 3602 * @vsi_id: VSI FW index 3603 * @key: pointer to key info struct 3604 * @set: set true to set the key, false to get the key 3605 * 3606 * get (0x0B04) or set (0x0B02) the RSS key per VSI 3607 */ 3608 static enum 3609 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 3610 struct ice_aqc_get_set_rss_keys *key, 3611 bool set) 3612 { 3613 struct ice_aqc_get_set_rss_key *cmd_resp; 3614 u16 key_size = sizeof(*key); 3615 struct ice_aq_desc desc; 3616 3617 cmd_resp = &desc.params.get_set_rss_key; 3618 3619 if (set) { 3620 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 3621 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3622 } else { 3623 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 3624 } 3625 3626 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3627 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 3628 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 3629 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 3630 3631 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 3632 } 3633 3634 /** 3635 * ice_aq_get_rss_key 3636 * @hw: pointer to the HW struct 3637 * @vsi_handle: software VSI handle 3638 * @key: pointer to key info struct 3639 * 3640 * get the RSS key per VSI 3641 */ 3642 enum ice_status 3643 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 3644 struct ice_aqc_get_set_rss_keys *key) 3645 { 3646 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 3647 return ICE_ERR_PARAM; 3648 3649 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3650 key, false); 3651 } 3652 3653 /** 3654 * ice_aq_set_rss_key 3655 * @hw: pointer to the HW struct 3656 * @vsi_handle: software VSI handle 3657 * @keys: pointer to key info struct 3658 * 3659 * set the RSS key per VSI 3660 */ 3661 enum ice_status 3662 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 3663 struct ice_aqc_get_set_rss_keys *keys) 3664 { 3665 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 3666 return ICE_ERR_PARAM; 3667 3668 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3669 keys, true); 3670 } 3671 3672 /** 3673 * ice_aq_add_lan_txq 3674 * @hw: pointer to the hardware structure 3675 * @num_qgrps: Number of added queue groups 3676 * @qg_list: list of queue groups to be added 3677 * @buf_size: size of buffer for indirect command 3678 * @cd: pointer to command details structure or NULL 3679 * 3680 * Add Tx LAN queue (0x0C30) 3681 * 3682 * NOTE: 3683 * Prior to calling add Tx LAN queue: 3684 * Initialize the following as part of the Tx queue context: 3685 * Completion queue ID if the queue uses Completion queue, Quanta profile, 3686 * Cache profile and Packet shaper profile. 3687 * 3688 * After add Tx LAN queue AQ command is completed: 3689 * Interrupts should be associated with specific queues, 3690 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 3691 * flow. 3692 */ 3693 static enum ice_status 3694 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 3695 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 3696 struct ice_sq_cd *cd) 3697 { 3698 struct ice_aqc_add_tx_qgrp *list; 3699 struct ice_aqc_add_txqs *cmd; 3700 struct ice_aq_desc desc; 3701 u16 i, sum_size = 0; 3702 3703 cmd = &desc.params.add_txqs; 3704 3705 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 3706 3707 if (!qg_list) 3708 return ICE_ERR_PARAM; 3709 3710 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 3711 return ICE_ERR_PARAM; 3712 3713 for (i = 0, list = qg_list; i < num_qgrps; i++) { 3714 sum_size += struct_size(list, txqs, list->num_txqs); 3715 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 3716 list->num_txqs); 3717 } 3718 3719 if (buf_size != sum_size) 3720 return ICE_ERR_PARAM; 3721 3722 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3723 3724 cmd->num_qgrps = num_qgrps; 3725 3726 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 3727 } 3728 3729 /** 3730 * ice_aq_dis_lan_txq 3731 * @hw: pointer to the hardware structure 3732 * @num_qgrps: number of groups in the list 3733 * @qg_list: the list of groups to disable 3734 * @buf_size: the total size of the qg_list buffer in bytes 3735 * @rst_src: if called due to reset, specifies the reset source 3736 * @vmvf_num: the relative VM or VF number that is undergoing the reset 3737 * @cd: pointer to command details structure or NULL 3738 * 3739 * Disable LAN Tx queue (0x0C31) 3740 */ 3741 static enum ice_status 3742 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 3743 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 3744 enum ice_disq_rst_src rst_src, u16 vmvf_num, 3745 struct ice_sq_cd *cd) 3746 { 3747 struct ice_aqc_dis_txq_item *item; 3748 struct ice_aqc_dis_txqs *cmd; 3749 struct ice_aq_desc desc; 3750 enum ice_status status; 3751 u16 i, sz = 0; 3752 3753 cmd = &desc.params.dis_txqs; 3754 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 3755 3756 /* qg_list can be NULL only in VM/VF reset flow */ 3757 if (!qg_list && !rst_src) 3758 return ICE_ERR_PARAM; 3759 3760 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 3761 return ICE_ERR_PARAM; 3762 3763 cmd->num_entries = num_qgrps; 3764 3765 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 3766 ICE_AQC_Q_DIS_TIMEOUT_M); 3767 3768 switch (rst_src) { 3769 case ICE_VM_RESET: 3770 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 3771 cmd->vmvf_and_timeout |= 3772 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 3773 break; 3774 case ICE_VF_RESET: 3775 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 3776 /* In this case, FW expects vmvf_num to be absolute VF ID */ 3777 cmd->vmvf_and_timeout |= 3778 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 3779 ICE_AQC_Q_DIS_VMVF_NUM_M); 3780 break; 3781 case ICE_NO_RESET: 3782 default: 3783 break; 3784 } 3785 3786 /* flush pipe on time out */ 3787 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 3788 /* If no queue group info, we are in a reset flow. Issue the AQ */ 3789 if (!qg_list) 3790 goto do_aq; 3791 3792 /* set RD bit to indicate that command buffer is provided by the driver 3793 * and it needs to be read by the firmware 3794 */ 3795 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3796 3797 for (i = 0, item = qg_list; i < num_qgrps; i++) { 3798 u16 item_size = struct_size(item, q_id, item->num_qs); 3799 3800 /* If the num of queues is even, add 2 bytes of padding */ 3801 if ((item->num_qs % 2) == 0) 3802 item_size += 2; 3803 3804 sz += item_size; 3805 3806 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 3807 } 3808 3809 if (buf_size != sz) 3810 return ICE_ERR_PARAM; 3811 3812 do_aq: 3813 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 3814 if (status) { 3815 if (!qg_list) 3816 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 3817 vmvf_num, hw->adminq.sq_last_status); 3818 else 3819 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 3820 le16_to_cpu(qg_list[0].q_id[0]), 3821 hw->adminq.sq_last_status); 3822 } 3823 return status; 3824 } 3825 3826 /** 3827 * ice_aq_add_rdma_qsets 3828 * @hw: pointer to the hardware structure 3829 * @num_qset_grps: Number of RDMA Qset groups 3830 * @qset_list: list of Qset groups to be added 3831 * @buf_size: size of buffer for indirect command 3832 * @cd: pointer to command details structure or NULL 3833 * 3834 * Add Tx RDMA Qsets (0x0C33) 3835 */ 3836 static int 3837 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 3838 struct ice_aqc_add_rdma_qset_data *qset_list, 3839 u16 buf_size, struct ice_sq_cd *cd) 3840 { 3841 struct ice_aqc_add_rdma_qset_data *list; 3842 struct ice_aqc_add_rdma_qset *cmd; 3843 struct ice_aq_desc desc; 3844 u16 i, sum_size = 0; 3845 3846 cmd = &desc.params.add_rdma_qset; 3847 3848 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 3849 3850 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 3851 return -EINVAL; 3852 3853 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 3854 u16 num_qsets = le16_to_cpu(list->num_qsets); 3855 3856 sum_size += struct_size(list, rdma_qsets, num_qsets); 3857 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 3858 num_qsets); 3859 } 3860 3861 if (buf_size != sum_size) 3862 return -EINVAL; 3863 3864 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3865 3866 cmd->num_qset_grps = num_qset_grps; 3867 3868 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list, 3869 buf_size, cd)); 3870 } 3871 3872 /* End of FW Admin Queue command wrappers */ 3873 3874 /** 3875 * ice_write_byte - write a byte to a packed context structure 3876 * @src_ctx: the context structure to read from 3877 * @dest_ctx: the context to be written to 3878 * @ce_info: a description of the struct to be filled 3879 */ 3880 static void 3881 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3882 { 3883 u8 src_byte, dest_byte, mask; 3884 u8 *from, *dest; 3885 u16 shift_width; 3886 3887 /* copy from the next struct field */ 3888 from = src_ctx + ce_info->offset; 3889 3890 /* prepare the bits and mask */ 3891 shift_width = ce_info->lsb % 8; 3892 mask = (u8)(BIT(ce_info->width) - 1); 3893 3894 src_byte = *from; 3895 src_byte &= mask; 3896 3897 /* shift to correct alignment */ 3898 mask <<= shift_width; 3899 src_byte <<= shift_width; 3900 3901 /* get the current bits from the target bit string */ 3902 dest = dest_ctx + (ce_info->lsb / 8); 3903 3904 memcpy(&dest_byte, dest, sizeof(dest_byte)); 3905 3906 dest_byte &= ~mask; /* get the bits not changing */ 3907 dest_byte |= src_byte; /* add in the new bits */ 3908 3909 /* put it all back */ 3910 memcpy(dest, &dest_byte, sizeof(dest_byte)); 3911 } 3912 3913 /** 3914 * ice_write_word - write a word to a packed context structure 3915 * @src_ctx: the context structure to read from 3916 * @dest_ctx: the context to be written to 3917 * @ce_info: a description of the struct to be filled 3918 */ 3919 static void 3920 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3921 { 3922 u16 src_word, mask; 3923 __le16 dest_word; 3924 u8 *from, *dest; 3925 u16 shift_width; 3926 3927 /* copy from the next struct field */ 3928 from = src_ctx + ce_info->offset; 3929 3930 /* prepare the bits and mask */ 3931 shift_width = ce_info->lsb % 8; 3932 mask = BIT(ce_info->width) - 1; 3933 3934 /* don't swizzle the bits until after the mask because the mask bits 3935 * will be in a different bit position on big endian machines 3936 */ 3937 src_word = *(u16 *)from; 3938 src_word &= mask; 3939 3940 /* shift to correct alignment */ 3941 mask <<= shift_width; 3942 src_word <<= shift_width; 3943 3944 /* get the current bits from the target bit string */ 3945 dest = dest_ctx + (ce_info->lsb / 8); 3946 3947 memcpy(&dest_word, dest, sizeof(dest_word)); 3948 3949 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 3950 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 3951 3952 /* put it all back */ 3953 memcpy(dest, &dest_word, sizeof(dest_word)); 3954 } 3955 3956 /** 3957 * ice_write_dword - write a dword to a packed context structure 3958 * @src_ctx: the context structure to read from 3959 * @dest_ctx: the context to be written to 3960 * @ce_info: a description of the struct to be filled 3961 */ 3962 static void 3963 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3964 { 3965 u32 src_dword, mask; 3966 __le32 dest_dword; 3967 u8 *from, *dest; 3968 u16 shift_width; 3969 3970 /* copy from the next struct field */ 3971 from = src_ctx + ce_info->offset; 3972 3973 /* prepare the bits and mask */ 3974 shift_width = ce_info->lsb % 8; 3975 3976 /* if the field width is exactly 32 on an x86 machine, then the shift 3977 * operation will not work because the SHL instructions count is masked 3978 * to 5 bits so the shift will do nothing 3979 */ 3980 if (ce_info->width < 32) 3981 mask = BIT(ce_info->width) - 1; 3982 else 3983 mask = (u32)~0; 3984 3985 /* don't swizzle the bits until after the mask because the mask bits 3986 * will be in a different bit position on big endian machines 3987 */ 3988 src_dword = *(u32 *)from; 3989 src_dword &= mask; 3990 3991 /* shift to correct alignment */ 3992 mask <<= shift_width; 3993 src_dword <<= shift_width; 3994 3995 /* get the current bits from the target bit string */ 3996 dest = dest_ctx + (ce_info->lsb / 8); 3997 3998 memcpy(&dest_dword, dest, sizeof(dest_dword)); 3999 4000 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 4001 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 4002 4003 /* put it all back */ 4004 memcpy(dest, &dest_dword, sizeof(dest_dword)); 4005 } 4006 4007 /** 4008 * ice_write_qword - write a qword to a packed context structure 4009 * @src_ctx: the context structure to read from 4010 * @dest_ctx: the context to be written to 4011 * @ce_info: a description of the struct to be filled 4012 */ 4013 static void 4014 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 4015 { 4016 u64 src_qword, mask; 4017 __le64 dest_qword; 4018 u8 *from, *dest; 4019 u16 shift_width; 4020 4021 /* copy from the next struct field */ 4022 from = src_ctx + ce_info->offset; 4023 4024 /* prepare the bits and mask */ 4025 shift_width = ce_info->lsb % 8; 4026 4027 /* if the field width is exactly 64 on an x86 machine, then the shift 4028 * operation will not work because the SHL instructions count is masked 4029 * to 6 bits so the shift will do nothing 4030 */ 4031 if (ce_info->width < 64) 4032 mask = BIT_ULL(ce_info->width) - 1; 4033 else 4034 mask = (u64)~0; 4035 4036 /* don't swizzle the bits until after the mask because the mask bits 4037 * will be in a different bit position on big endian machines 4038 */ 4039 src_qword = *(u64 *)from; 4040 src_qword &= mask; 4041 4042 /* shift to correct alignment */ 4043 mask <<= shift_width; 4044 src_qword <<= shift_width; 4045 4046 /* get the current bits from the target bit string */ 4047 dest = dest_ctx + (ce_info->lsb / 8); 4048 4049 memcpy(&dest_qword, dest, sizeof(dest_qword)); 4050 4051 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 4052 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 4053 4054 /* put it all back */ 4055 memcpy(dest, &dest_qword, sizeof(dest_qword)); 4056 } 4057 4058 /** 4059 * ice_set_ctx - set context bits in packed structure 4060 * @hw: pointer to the hardware structure 4061 * @src_ctx: pointer to a generic non-packed context structure 4062 * @dest_ctx: pointer to memory for the packed structure 4063 * @ce_info: a description of the structure to be transformed 4064 */ 4065 enum ice_status 4066 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 4067 const struct ice_ctx_ele *ce_info) 4068 { 4069 int f; 4070 4071 for (f = 0; ce_info[f].width; f++) { 4072 /* We have to deal with each element of the FW response 4073 * using the correct size so that we are correct regardless 4074 * of the endianness of the machine. 4075 */ 4076 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 4077 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 4078 f, ce_info[f].width, ce_info[f].size_of); 4079 continue; 4080 } 4081 switch (ce_info[f].size_of) { 4082 case sizeof(u8): 4083 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 4084 break; 4085 case sizeof(u16): 4086 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 4087 break; 4088 case sizeof(u32): 4089 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 4090 break; 4091 case sizeof(u64): 4092 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 4093 break; 4094 default: 4095 return ICE_ERR_INVAL_SIZE; 4096 } 4097 } 4098 4099 return 0; 4100 } 4101 4102 /** 4103 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 4104 * @hw: pointer to the HW struct 4105 * @vsi_handle: software VSI handle 4106 * @tc: TC number 4107 * @q_handle: software queue handle 4108 */ 4109 struct ice_q_ctx * 4110 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 4111 { 4112 struct ice_vsi_ctx *vsi; 4113 struct ice_q_ctx *q_ctx; 4114 4115 vsi = ice_get_vsi_ctx(hw, vsi_handle); 4116 if (!vsi) 4117 return NULL; 4118 if (q_handle >= vsi->num_lan_q_entries[tc]) 4119 return NULL; 4120 if (!vsi->lan_q_ctx[tc]) 4121 return NULL; 4122 q_ctx = vsi->lan_q_ctx[tc]; 4123 return &q_ctx[q_handle]; 4124 } 4125 4126 /** 4127 * ice_ena_vsi_txq 4128 * @pi: port information structure 4129 * @vsi_handle: software VSI handle 4130 * @tc: TC number 4131 * @q_handle: software queue handle 4132 * @num_qgrps: Number of added queue groups 4133 * @buf: list of queue groups to be added 4134 * @buf_size: size of buffer for indirect command 4135 * @cd: pointer to command details structure or NULL 4136 * 4137 * This function adds one LAN queue 4138 */ 4139 enum ice_status 4140 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 4141 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 4142 struct ice_sq_cd *cd) 4143 { 4144 struct ice_aqc_txsched_elem_data node = { 0 }; 4145 struct ice_sched_node *parent; 4146 struct ice_q_ctx *q_ctx; 4147 enum ice_status status; 4148 struct ice_hw *hw; 4149 4150 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4151 return ICE_ERR_CFG; 4152 4153 if (num_qgrps > 1 || buf->num_txqs > 1) 4154 return ICE_ERR_MAX_LIMIT; 4155 4156 hw = pi->hw; 4157 4158 if (!ice_is_vsi_valid(hw, vsi_handle)) 4159 return ICE_ERR_PARAM; 4160 4161 mutex_lock(&pi->sched_lock); 4162 4163 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 4164 if (!q_ctx) { 4165 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 4166 q_handle); 4167 status = ICE_ERR_PARAM; 4168 goto ena_txq_exit; 4169 } 4170 4171 /* find a parent node */ 4172 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4173 ICE_SCHED_NODE_OWNER_LAN); 4174 if (!parent) { 4175 status = ICE_ERR_PARAM; 4176 goto ena_txq_exit; 4177 } 4178 4179 buf->parent_teid = parent->info.node_teid; 4180 node.parent_teid = parent->info.node_teid; 4181 /* Mark that the values in the "generic" section as valid. The default 4182 * value in the "generic" section is zero. This means that : 4183 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4184 * - 0 priority among siblings, indicated by Bit 1-3. 4185 * - WFQ, indicated by Bit 4. 4186 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4187 * Bit 5-6. 4188 * - Bit 7 is reserved. 4189 * Without setting the generic section as valid in valid_sections, the 4190 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4191 */ 4192 buf->txqs[0].info.valid_sections = 4193 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4194 ICE_AQC_ELEM_VALID_EIR; 4195 buf->txqs[0].info.generic = 0; 4196 buf->txqs[0].info.cir_bw.bw_profile_idx = 4197 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4198 buf->txqs[0].info.cir_bw.bw_alloc = 4199 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4200 buf->txqs[0].info.eir_bw.bw_profile_idx = 4201 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4202 buf->txqs[0].info.eir_bw.bw_alloc = 4203 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4204 4205 /* add the LAN queue */ 4206 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4207 if (status) { 4208 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4209 le16_to_cpu(buf->txqs[0].txq_id), 4210 hw->adminq.sq_last_status); 4211 goto ena_txq_exit; 4212 } 4213 4214 node.node_teid = buf->txqs[0].q_teid; 4215 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4216 q_ctx->q_handle = q_handle; 4217 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4218 4219 /* add a leaf node into scheduler tree queue layer */ 4220 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 4221 if (!status) 4222 status = ice_sched_replay_q_bw(pi, q_ctx); 4223 4224 ena_txq_exit: 4225 mutex_unlock(&pi->sched_lock); 4226 return status; 4227 } 4228 4229 /** 4230 * ice_dis_vsi_txq 4231 * @pi: port information structure 4232 * @vsi_handle: software VSI handle 4233 * @tc: TC number 4234 * @num_queues: number of queues 4235 * @q_handles: pointer to software queue handle array 4236 * @q_ids: pointer to the q_id array 4237 * @q_teids: pointer to queue node teids 4238 * @rst_src: if called due to reset, specifies the reset source 4239 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4240 * @cd: pointer to command details structure or NULL 4241 * 4242 * This function removes queues and their corresponding nodes in SW DB 4243 */ 4244 enum ice_status 4245 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4246 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4247 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4248 struct ice_sq_cd *cd) 4249 { 4250 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 4251 struct ice_aqc_dis_txq_item *qg_list; 4252 struct ice_q_ctx *q_ctx; 4253 struct ice_hw *hw; 4254 u16 i, buf_size; 4255 4256 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4257 return ICE_ERR_CFG; 4258 4259 hw = pi->hw; 4260 4261 if (!num_queues) { 4262 /* if queue is disabled already yet the disable queue command 4263 * has to be sent to complete the VF reset, then call 4264 * ice_aq_dis_lan_txq without any queue information 4265 */ 4266 if (rst_src) 4267 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4268 vmvf_num, NULL); 4269 return ICE_ERR_CFG; 4270 } 4271 4272 buf_size = struct_size(qg_list, q_id, 1); 4273 qg_list = kzalloc(buf_size, GFP_KERNEL); 4274 if (!qg_list) 4275 return ICE_ERR_NO_MEMORY; 4276 4277 mutex_lock(&pi->sched_lock); 4278 4279 for (i = 0; i < num_queues; i++) { 4280 struct ice_sched_node *node; 4281 4282 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4283 if (!node) 4284 continue; 4285 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4286 if (!q_ctx) { 4287 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4288 q_handles[i]); 4289 continue; 4290 } 4291 if (q_ctx->q_handle != q_handles[i]) { 4292 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4293 q_ctx->q_handle, q_handles[i]); 4294 continue; 4295 } 4296 qg_list->parent_teid = node->info.parent_teid; 4297 qg_list->num_qs = 1; 4298 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4299 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4300 vmvf_num, cd); 4301 4302 if (status) 4303 break; 4304 ice_free_sched_node(pi, node); 4305 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4306 } 4307 mutex_unlock(&pi->sched_lock); 4308 kfree(qg_list); 4309 return status; 4310 } 4311 4312 /** 4313 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4314 * @pi: port information structure 4315 * @vsi_handle: software VSI handle 4316 * @tc_bitmap: TC bitmap 4317 * @maxqs: max queues array per TC 4318 * @owner: LAN or RDMA 4319 * 4320 * This function adds/updates the VSI queues per TC. 4321 */ 4322 static enum ice_status 4323 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4324 u16 *maxqs, u8 owner) 4325 { 4326 enum ice_status status = 0; 4327 u8 i; 4328 4329 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4330 return ICE_ERR_CFG; 4331 4332 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4333 return ICE_ERR_PARAM; 4334 4335 mutex_lock(&pi->sched_lock); 4336 4337 ice_for_each_traffic_class(i) { 4338 /* configuration is possible only if TC node is present */ 4339 if (!ice_sched_get_tc_node(pi, i)) 4340 continue; 4341 4342 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4343 ice_is_tc_ena(tc_bitmap, i)); 4344 if (status) 4345 break; 4346 } 4347 4348 mutex_unlock(&pi->sched_lock); 4349 return status; 4350 } 4351 4352 /** 4353 * ice_cfg_vsi_lan - configure VSI LAN queues 4354 * @pi: port information structure 4355 * @vsi_handle: software VSI handle 4356 * @tc_bitmap: TC bitmap 4357 * @max_lanqs: max LAN queues array per TC 4358 * 4359 * This function adds/updates the VSI LAN queues per TC. 4360 */ 4361 enum ice_status 4362 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4363 u16 *max_lanqs) 4364 { 4365 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4366 ICE_SCHED_NODE_OWNER_LAN); 4367 } 4368 4369 /** 4370 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4371 * @pi: port information structure 4372 * @vsi_handle: software VSI handle 4373 * @tc_bitmap: TC bitmap 4374 * @max_rdmaqs: max RDMA queues array per TC 4375 * 4376 * This function adds/updates the VSI RDMA queues per TC. 4377 */ 4378 int 4379 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4380 u16 *max_rdmaqs) 4381 { 4382 return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, 4383 max_rdmaqs, 4384 ICE_SCHED_NODE_OWNER_RDMA)); 4385 } 4386 4387 /** 4388 * ice_ena_vsi_rdma_qset 4389 * @pi: port information structure 4390 * @vsi_handle: software VSI handle 4391 * @tc: TC number 4392 * @rdma_qset: pointer to RDMA Qset 4393 * @num_qsets: number of RDMA Qsets 4394 * @qset_teid: pointer to Qset node TEIDs 4395 * 4396 * This function adds RDMA Qset 4397 */ 4398 int 4399 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4400 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4401 { 4402 struct ice_aqc_txsched_elem_data node = { 0 }; 4403 struct ice_aqc_add_rdma_qset_data *buf; 4404 struct ice_sched_node *parent; 4405 enum ice_status status; 4406 struct ice_hw *hw; 4407 u16 i, buf_size; 4408 int ret; 4409 4410 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4411 return -EIO; 4412 hw = pi->hw; 4413 4414 if (!ice_is_vsi_valid(hw, vsi_handle)) 4415 return -EINVAL; 4416 4417 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4418 buf = kzalloc(buf_size, GFP_KERNEL); 4419 if (!buf) 4420 return -ENOMEM; 4421 mutex_lock(&pi->sched_lock); 4422 4423 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4424 ICE_SCHED_NODE_OWNER_RDMA); 4425 if (!parent) { 4426 ret = -EINVAL; 4427 goto rdma_error_exit; 4428 } 4429 buf->parent_teid = parent->info.node_teid; 4430 node.parent_teid = parent->info.node_teid; 4431 4432 buf->num_qsets = cpu_to_le16(num_qsets); 4433 for (i = 0; i < num_qsets; i++) { 4434 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4435 buf->rdma_qsets[i].info.valid_sections = 4436 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4437 ICE_AQC_ELEM_VALID_EIR; 4438 buf->rdma_qsets[i].info.generic = 0; 4439 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4440 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4441 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4442 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4443 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4444 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4445 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4446 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4447 } 4448 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4449 if (ret) { 4450 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4451 goto rdma_error_exit; 4452 } 4453 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4454 for (i = 0; i < num_qsets; i++) { 4455 node.node_teid = buf->rdma_qsets[i].qset_teid; 4456 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4457 &node); 4458 if (status) { 4459 ret = ice_status_to_errno(status); 4460 break; 4461 } 4462 qset_teid[i] = le32_to_cpu(node.node_teid); 4463 } 4464 rdma_error_exit: 4465 mutex_unlock(&pi->sched_lock); 4466 kfree(buf); 4467 return ret; 4468 } 4469 4470 /** 4471 * ice_dis_vsi_rdma_qset - free RDMA resources 4472 * @pi: port_info struct 4473 * @count: number of RDMA Qsets to free 4474 * @qset_teid: TEID of Qset node 4475 * @q_id: list of queue IDs being disabled 4476 */ 4477 int 4478 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4479 u16 *q_id) 4480 { 4481 struct ice_aqc_dis_txq_item *qg_list; 4482 enum ice_status status = 0; 4483 struct ice_hw *hw; 4484 u16 qg_size; 4485 int i; 4486 4487 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4488 return -EIO; 4489 4490 hw = pi->hw; 4491 4492 qg_size = struct_size(qg_list, q_id, 1); 4493 qg_list = kzalloc(qg_size, GFP_KERNEL); 4494 if (!qg_list) 4495 return -ENOMEM; 4496 4497 mutex_lock(&pi->sched_lock); 4498 4499 for (i = 0; i < count; i++) { 4500 struct ice_sched_node *node; 4501 4502 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4503 if (!node) 4504 continue; 4505 4506 qg_list->parent_teid = node->info.parent_teid; 4507 qg_list->num_qs = 1; 4508 qg_list->q_id[0] = 4509 cpu_to_le16(q_id[i] | 4510 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4511 4512 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4513 ICE_NO_RESET, 0, NULL); 4514 if (status) 4515 break; 4516 4517 ice_free_sched_node(pi, node); 4518 } 4519 4520 mutex_unlock(&pi->sched_lock); 4521 kfree(qg_list); 4522 return ice_status_to_errno(status); 4523 } 4524 4525 /** 4526 * ice_replay_pre_init - replay pre initialization 4527 * @hw: pointer to the HW struct 4528 * 4529 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 4530 */ 4531 static enum ice_status ice_replay_pre_init(struct ice_hw *hw) 4532 { 4533 struct ice_switch_info *sw = hw->switch_info; 4534 u8 i; 4535 4536 /* Delete old entries from replay filter list head if there is any */ 4537 ice_rm_all_sw_replay_rule_info(hw); 4538 /* In start of replay, move entries into replay_rules list, it 4539 * will allow adding rules entries back to filt_rules list, 4540 * which is operational list. 4541 */ 4542 for (i = 0; i < ICE_SW_LKUP_LAST; i++) 4543 list_replace_init(&sw->recp_list[i].filt_rules, 4544 &sw->recp_list[i].filt_replay_rules); 4545 ice_sched_replay_agg_vsi_preinit(hw); 4546 4547 return 0; 4548 } 4549 4550 /** 4551 * ice_replay_vsi - replay VSI configuration 4552 * @hw: pointer to the HW struct 4553 * @vsi_handle: driver VSI handle 4554 * 4555 * Restore all VSI configuration after reset. It is required to call this 4556 * function with main VSI first. 4557 */ 4558 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 4559 { 4560 enum ice_status status; 4561 4562 if (!ice_is_vsi_valid(hw, vsi_handle)) 4563 return ICE_ERR_PARAM; 4564 4565 /* Replay pre-initialization if there is any */ 4566 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 4567 status = ice_replay_pre_init(hw); 4568 if (status) 4569 return status; 4570 } 4571 /* Replay per VSI all RSS configurations */ 4572 status = ice_replay_rss_cfg(hw, vsi_handle); 4573 if (status) 4574 return status; 4575 /* Replay per VSI all filters */ 4576 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 4577 if (!status) 4578 status = ice_replay_vsi_agg(hw, vsi_handle); 4579 return status; 4580 } 4581 4582 /** 4583 * ice_replay_post - post replay configuration cleanup 4584 * @hw: pointer to the HW struct 4585 * 4586 * Post replay cleanup. 4587 */ 4588 void ice_replay_post(struct ice_hw *hw) 4589 { 4590 /* Delete old entries from replay filter list head */ 4591 ice_rm_all_sw_replay_rule_info(hw); 4592 ice_sched_replay_agg(hw); 4593 } 4594 4595 /** 4596 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 4597 * @hw: ptr to the hardware info 4598 * @reg: offset of 64 bit HW register to read from 4599 * @prev_stat_loaded: bool to specify if previous stats are loaded 4600 * @prev_stat: ptr to previous loaded stat value 4601 * @cur_stat: ptr to current stat value 4602 */ 4603 void 4604 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 4605 u64 *prev_stat, u64 *cur_stat) 4606 { 4607 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 4608 4609 /* device stats are not reset at PFR, they likely will not be zeroed 4610 * when the driver starts. Thus, save the value from the first read 4611 * without adding to the statistic value so that we report stats which 4612 * count up from zero. 4613 */ 4614 if (!prev_stat_loaded) { 4615 *prev_stat = new_data; 4616 return; 4617 } 4618 4619 /* Calculate the difference between the new and old values, and then 4620 * add it to the software stat value. 4621 */ 4622 if (new_data >= *prev_stat) 4623 *cur_stat += new_data - *prev_stat; 4624 else 4625 /* to manage the potential roll-over */ 4626 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 4627 4628 /* Update the previously stored value to prepare for next read */ 4629 *prev_stat = new_data; 4630 } 4631 4632 /** 4633 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 4634 * @hw: ptr to the hardware info 4635 * @reg: offset of HW register to read from 4636 * @prev_stat_loaded: bool to specify if previous stats are loaded 4637 * @prev_stat: ptr to previous loaded stat value 4638 * @cur_stat: ptr to current stat value 4639 */ 4640 void 4641 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 4642 u64 *prev_stat, u64 *cur_stat) 4643 { 4644 u32 new_data; 4645 4646 new_data = rd32(hw, reg); 4647 4648 /* device stats are not reset at PFR, they likely will not be zeroed 4649 * when the driver starts. Thus, save the value from the first read 4650 * without adding to the statistic value so that we report stats which 4651 * count up from zero. 4652 */ 4653 if (!prev_stat_loaded) { 4654 *prev_stat = new_data; 4655 return; 4656 } 4657 4658 /* Calculate the difference between the new and old values, and then 4659 * add it to the software stat value. 4660 */ 4661 if (new_data >= *prev_stat) 4662 *cur_stat += new_data - *prev_stat; 4663 else 4664 /* to manage the potential roll-over */ 4665 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 4666 4667 /* Update the previously stored value to prepare for next read */ 4668 *prev_stat = new_data; 4669 } 4670 4671 /** 4672 * ice_sched_query_elem - query element information from HW 4673 * @hw: pointer to the HW struct 4674 * @node_teid: node TEID to be queried 4675 * @buf: buffer to element information 4676 * 4677 * This function queries HW element information 4678 */ 4679 enum ice_status 4680 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 4681 struct ice_aqc_txsched_elem_data *buf) 4682 { 4683 u16 buf_size, num_elem_ret = 0; 4684 enum ice_status status; 4685 4686 buf_size = sizeof(*buf); 4687 memset(buf, 0, buf_size); 4688 buf->node_teid = cpu_to_le32(node_teid); 4689 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 4690 NULL); 4691 if (status || num_elem_ret != 1) 4692 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 4693 return status; 4694 } 4695 4696 /** 4697 * ice_aq_set_driver_param - Set driver parameter to share via firmware 4698 * @hw: pointer to the HW struct 4699 * @idx: parameter index to set 4700 * @value: the value to set the parameter to 4701 * @cd: pointer to command details structure or NULL 4702 * 4703 * Set the value of one of the software defined parameters. All PFs connected 4704 * to this device can read the value using ice_aq_get_driver_param. 4705 * 4706 * Note that firmware provides no synchronization or locking, and will not 4707 * save the parameter value during a device reset. It is expected that 4708 * a single PF will write the parameter value, while all other PFs will only 4709 * read it. 4710 */ 4711 int 4712 ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 4713 u32 value, struct ice_sq_cd *cd) 4714 { 4715 struct ice_aqc_driver_shared_params *cmd; 4716 struct ice_aq_desc desc; 4717 4718 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 4719 return -EIO; 4720 4721 cmd = &desc.params.drv_shared_params; 4722 4723 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 4724 4725 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET; 4726 cmd->param_indx = idx; 4727 cmd->param_val = cpu_to_le32(value); 4728 4729 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd)); 4730 } 4731 4732 /** 4733 * ice_aq_get_driver_param - Get driver parameter shared via firmware 4734 * @hw: pointer to the HW struct 4735 * @idx: parameter index to set 4736 * @value: storage to return the shared parameter 4737 * @cd: pointer to command details structure or NULL 4738 * 4739 * Get the value of one of the software defined parameters. 4740 * 4741 * Note that firmware provides no synchronization or locking. It is expected 4742 * that only a single PF will write a given parameter. 4743 */ 4744 int 4745 ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx, 4746 u32 *value, struct ice_sq_cd *cd) 4747 { 4748 struct ice_aqc_driver_shared_params *cmd; 4749 struct ice_aq_desc desc; 4750 enum ice_status status; 4751 4752 if (idx >= ICE_AQC_DRIVER_PARAM_MAX) 4753 return -EIO; 4754 4755 cmd = &desc.params.drv_shared_params; 4756 4757 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params); 4758 4759 cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET; 4760 cmd->param_indx = idx; 4761 4762 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 4763 if (status) 4764 return ice_status_to_errno(status); 4765 4766 *value = le32_to_cpu(cmd->param_val); 4767 4768 return 0; 4769 } 4770 4771 /** 4772 * ice_fw_supports_link_override 4773 * @hw: pointer to the hardware structure 4774 * 4775 * Checks if the firmware supports link override 4776 */ 4777 bool ice_fw_supports_link_override(struct ice_hw *hw) 4778 { 4779 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { 4780 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) 4781 return true; 4782 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && 4783 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) 4784 return true; 4785 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { 4786 return true; 4787 } 4788 4789 return false; 4790 } 4791 4792 /** 4793 * ice_get_link_default_override 4794 * @ldo: pointer to the link default override struct 4795 * @pi: pointer to the port info struct 4796 * 4797 * Gets the link default override for a port 4798 */ 4799 enum ice_status 4800 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 4801 struct ice_port_info *pi) 4802 { 4803 u16 i, tlv, tlv_len, tlv_start, buf, offset; 4804 struct ice_hw *hw = pi->hw; 4805 enum ice_status status; 4806 4807 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 4808 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 4809 if (status) { 4810 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 4811 return status; 4812 } 4813 4814 /* Each port has its own config; calculate for our port */ 4815 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 4816 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 4817 4818 /* link options first */ 4819 status = ice_read_sr_word(hw, tlv_start, &buf); 4820 if (status) { 4821 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 4822 return status; 4823 } 4824 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 4825 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 4826 ICE_LINK_OVERRIDE_PHY_CFG_S; 4827 4828 /* link PHY config */ 4829 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 4830 status = ice_read_sr_word(hw, offset, &buf); 4831 if (status) { 4832 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 4833 return status; 4834 } 4835 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 4836 4837 /* PHY types low */ 4838 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 4839 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 4840 status = ice_read_sr_word(hw, (offset + i), &buf); 4841 if (status) { 4842 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 4843 return status; 4844 } 4845 /* shift 16 bits at a time to fill 64 bits */ 4846 ldo->phy_type_low |= ((u64)buf << (i * 16)); 4847 } 4848 4849 /* PHY types high */ 4850 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 4851 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 4852 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 4853 status = ice_read_sr_word(hw, (offset + i), &buf); 4854 if (status) { 4855 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 4856 return status; 4857 } 4858 /* shift 16 bits at a time to fill 64 bits */ 4859 ldo->phy_type_high |= ((u64)buf << (i * 16)); 4860 } 4861 4862 return status; 4863 } 4864 4865 /** 4866 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 4867 * @caps: get PHY capability data 4868 */ 4869 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 4870 { 4871 if (caps->caps & ICE_AQC_PHY_AN_MODE || 4872 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 4873 ICE_AQC_PHY_AN_EN_CLAUSE73 | 4874 ICE_AQC_PHY_AN_EN_CLAUSE37)) 4875 return true; 4876 4877 return false; 4878 } 4879 4880 /** 4881 * ice_aq_set_lldp_mib - Set the LLDP MIB 4882 * @hw: pointer to the HW struct 4883 * @mib_type: Local, Remote or both Local and Remote MIBs 4884 * @buf: pointer to the caller-supplied buffer to store the MIB block 4885 * @buf_size: size of the buffer (in bytes) 4886 * @cd: pointer to command details structure or NULL 4887 * 4888 * Set the LLDP MIB. (0x0A08) 4889 */ 4890 enum ice_status 4891 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 4892 struct ice_sq_cd *cd) 4893 { 4894 struct ice_aqc_lldp_set_local_mib *cmd; 4895 struct ice_aq_desc desc; 4896 4897 cmd = &desc.params.lldp_set_mib; 4898 4899 if (buf_size == 0 || !buf) 4900 return ICE_ERR_PARAM; 4901 4902 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 4903 4904 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 4905 desc.datalen = cpu_to_le16(buf_size); 4906 4907 cmd->type = mib_type; 4908 cmd->length = cpu_to_le16(buf_size); 4909 4910 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4911 } 4912 4913 /** 4914 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 4915 * @hw: pointer to HW struct 4916 */ 4917 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 4918 { 4919 if (hw->mac_type != ICE_MAC_E810) 4920 return false; 4921 4922 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) { 4923 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN) 4924 return true; 4925 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN && 4926 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH) 4927 return true; 4928 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) { 4929 return true; 4930 } 4931 return false; 4932 } 4933 4934 /** 4935 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 4936 * @hw: pointer to HW struct 4937 * @vsi_num: absolute HW index for VSI 4938 * @add: boolean for if adding or removing a filter 4939 */ 4940 enum ice_status 4941 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 4942 { 4943 struct ice_aqc_lldp_filter_ctrl *cmd; 4944 struct ice_aq_desc desc; 4945 4946 cmd = &desc.params.lldp_filter_ctrl; 4947 4948 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 4949 4950 if (add) 4951 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 4952 else 4953 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 4954 4955 cmd->vsi_num = cpu_to_le16(vsi_num); 4956 4957 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4958 } 4959 4960 /** 4961 * ice_fw_supports_report_dflt_cfg 4962 * @hw: pointer to the hardware structure 4963 * 4964 * Checks if the firmware supports report default configuration 4965 */ 4966 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 4967 { 4968 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { 4969 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) 4970 return true; 4971 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN && 4972 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH) 4973 return true; 4974 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) { 4975 return true; 4976 } 4977 return false; 4978 } 4979