1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018, Intel Corporation. */ 3 4 #include "ice_common.h" 5 #include "ice_lib.h" 6 #include "ice_sched.h" 7 #include "ice_adminq_cmd.h" 8 #include "ice_flow.h" 9 10 #define ICE_PF_RESET_WAIT_COUNT 300 11 12 /** 13 * ice_set_mac_type - Sets MAC type 14 * @hw: pointer to the HW structure 15 * 16 * This function sets the MAC type of the adapter based on the 17 * vendor ID and device ID stored in the HW structure. 18 */ 19 static enum ice_status ice_set_mac_type(struct ice_hw *hw) 20 { 21 if (hw->vendor_id != PCI_VENDOR_ID_INTEL) 22 return ICE_ERR_DEVICE_NOT_SUPPORTED; 23 24 switch (hw->device_id) { 25 case ICE_DEV_ID_E810C_BACKPLANE: 26 case ICE_DEV_ID_E810C_QSFP: 27 case ICE_DEV_ID_E810C_SFP: 28 case ICE_DEV_ID_E810_XXV_SFP: 29 hw->mac_type = ICE_MAC_E810; 30 break; 31 case ICE_DEV_ID_E823C_10G_BASE_T: 32 case ICE_DEV_ID_E823C_BACKPLANE: 33 case ICE_DEV_ID_E823C_QSFP: 34 case ICE_DEV_ID_E823C_SFP: 35 case ICE_DEV_ID_E823C_SGMII: 36 case ICE_DEV_ID_E822C_10G_BASE_T: 37 case ICE_DEV_ID_E822C_BACKPLANE: 38 case ICE_DEV_ID_E822C_QSFP: 39 case ICE_DEV_ID_E822C_SFP: 40 case ICE_DEV_ID_E822C_SGMII: 41 case ICE_DEV_ID_E822L_10G_BASE_T: 42 case ICE_DEV_ID_E822L_BACKPLANE: 43 case ICE_DEV_ID_E822L_SFP: 44 case ICE_DEV_ID_E822L_SGMII: 45 case ICE_DEV_ID_E823L_10G_BASE_T: 46 case ICE_DEV_ID_E823L_1GBE: 47 case ICE_DEV_ID_E823L_BACKPLANE: 48 case ICE_DEV_ID_E823L_QSFP: 49 case ICE_DEV_ID_E823L_SFP: 50 hw->mac_type = ICE_MAC_GENERIC; 51 break; 52 default: 53 hw->mac_type = ICE_MAC_UNKNOWN; 54 break; 55 } 56 57 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); 58 return 0; 59 } 60 61 /** 62 * ice_clear_pf_cfg - Clear PF configuration 63 * @hw: pointer to the hardware structure 64 * 65 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port 66 * configuration, flow director filters, etc.). 67 */ 68 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) 69 { 70 struct ice_aq_desc desc; 71 72 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg); 73 74 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 75 } 76 77 /** 78 * ice_aq_manage_mac_read - manage MAC address read command 79 * @hw: pointer to the HW struct 80 * @buf: a virtual buffer to hold the manage MAC read response 81 * @buf_size: Size of the virtual buffer 82 * @cd: pointer to command details structure or NULL 83 * 84 * This function is used to return per PF station MAC address (0x0107). 85 * NOTE: Upon successful completion of this command, MAC address information 86 * is returned in user specified buffer. Please interpret user specified 87 * buffer as "manage_mac_read" response. 88 * Response such as various MAC addresses are stored in HW struct (port.mac) 89 * ice_discover_dev_caps is expected to be called before this function is 90 * called. 91 */ 92 static enum ice_status 93 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, 94 struct ice_sq_cd *cd) 95 { 96 struct ice_aqc_manage_mac_read_resp *resp; 97 struct ice_aqc_manage_mac_read *cmd; 98 struct ice_aq_desc desc; 99 enum ice_status status; 100 u16 flags; 101 u8 i; 102 103 cmd = &desc.params.mac_read; 104 105 if (buf_size < sizeof(*resp)) 106 return ICE_ERR_BUF_TOO_SHORT; 107 108 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read); 109 110 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 111 if (status) 112 return status; 113 114 resp = buf; 115 flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M; 116 117 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) { 118 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n"); 119 return ICE_ERR_CFG; 120 } 121 122 /* A single port can report up to two (LAN and WoL) addresses */ 123 for (i = 0; i < cmd->num_addr; i++) 124 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { 125 ether_addr_copy(hw->port_info->mac.lan_addr, 126 resp[i].mac_addr); 127 ether_addr_copy(hw->port_info->mac.perm_addr, 128 resp[i].mac_addr); 129 break; 130 } 131 132 return 0; 133 } 134 135 /** 136 * ice_aq_get_phy_caps - returns PHY capabilities 137 * @pi: port information structure 138 * @qual_mods: report qualified modules 139 * @report_mode: report mode capabilities 140 * @pcaps: structure for PHY capabilities to be filled 141 * @cd: pointer to command details structure or NULL 142 * 143 * Returns the various PHY capabilities supported on the Port (0x0600) 144 */ 145 enum ice_status 146 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, 147 struct ice_aqc_get_phy_caps_data *pcaps, 148 struct ice_sq_cd *cd) 149 { 150 struct ice_aqc_get_phy_caps *cmd; 151 u16 pcaps_size = sizeof(*pcaps); 152 struct ice_aq_desc desc; 153 enum ice_status status; 154 struct ice_hw *hw; 155 156 cmd = &desc.params.get_phy; 157 158 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) 159 return ICE_ERR_PARAM; 160 hw = pi->hw; 161 162 if (report_mode == ICE_AQC_REPORT_DFLT_CFG && 163 !ice_fw_supports_report_dflt_cfg(hw)) 164 return ICE_ERR_PARAM; 165 166 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); 167 168 if (qual_mods) 169 cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); 170 171 cmd->param0 |= cpu_to_le16(report_mode); 172 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); 173 174 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n", 175 report_mode); 176 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 177 (unsigned long long)le64_to_cpu(pcaps->phy_type_low)); 178 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 179 (unsigned long long)le64_to_cpu(pcaps->phy_type_high)); 180 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps); 181 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 182 pcaps->low_power_ctrl_an); 183 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap); 184 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", 185 pcaps->eeer_value); 186 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n", 187 pcaps->link_fec_options); 188 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n", 189 pcaps->module_compliance_enforcement); 190 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n", 191 pcaps->extended_compliance_code); 192 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n", 193 pcaps->module_type[0]); 194 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n", 195 pcaps->module_type[1]); 196 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", 197 pcaps->module_type[2]); 198 199 if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) { 200 pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); 201 pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); 202 memcpy(pi->phy.link_info.module_type, &pcaps->module_type, 203 sizeof(pi->phy.link_info.module_type)); 204 } 205 206 return status; 207 } 208 209 /** 210 * ice_aq_get_link_topo_handle - get link topology node return status 211 * @pi: port information structure 212 * @node_type: requested node type 213 * @cd: pointer to command details structure or NULL 214 * 215 * Get link topology node return status for specified node type (0x06E0) 216 * 217 * Node type cage can be used to determine if cage is present. If AQC 218 * returns error (ENOENT), then no cage present. If no cage present, then 219 * connection type is backplane or BASE-T. 220 */ 221 static enum ice_status 222 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, 223 struct ice_sq_cd *cd) 224 { 225 struct ice_aqc_get_link_topo *cmd; 226 struct ice_aq_desc desc; 227 228 cmd = &desc.params.get_link_topo; 229 230 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); 231 232 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << 233 ICE_AQC_LINK_TOPO_NODE_CTX_S); 234 235 /* set node type */ 236 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); 237 238 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 239 } 240 241 /** 242 * ice_is_media_cage_present 243 * @pi: port information structure 244 * 245 * Returns true if media cage is present, else false. If no cage, then 246 * media type is backplane or BASE-T. 247 */ 248 static bool ice_is_media_cage_present(struct ice_port_info *pi) 249 { 250 /* Node type cage can be used to determine if cage is present. If AQC 251 * returns error (ENOENT), then no cage present. If no cage present then 252 * connection type is backplane or BASE-T. 253 */ 254 return !ice_aq_get_link_topo_handle(pi, 255 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, 256 NULL); 257 } 258 259 /** 260 * ice_get_media_type - Gets media type 261 * @pi: port information structure 262 */ 263 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) 264 { 265 struct ice_link_status *hw_link_info; 266 267 if (!pi) 268 return ICE_MEDIA_UNKNOWN; 269 270 hw_link_info = &pi->phy.link_info; 271 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high) 272 /* If more than one media type is selected, report unknown */ 273 return ICE_MEDIA_UNKNOWN; 274 275 if (hw_link_info->phy_type_low) { 276 /* 1G SGMII is a special case where some DA cable PHYs 277 * may show this as an option when it really shouldn't 278 * be since SGMII is meant to be between a MAC and a PHY 279 * in a backplane. Try to detect this case and handle it 280 */ 281 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && 282 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 283 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || 284 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == 285 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) 286 return ICE_MEDIA_DA; 287 288 switch (hw_link_info->phy_type_low) { 289 case ICE_PHY_TYPE_LOW_1000BASE_SX: 290 case ICE_PHY_TYPE_LOW_1000BASE_LX: 291 case ICE_PHY_TYPE_LOW_10GBASE_SR: 292 case ICE_PHY_TYPE_LOW_10GBASE_LR: 293 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 294 case ICE_PHY_TYPE_LOW_25GBASE_SR: 295 case ICE_PHY_TYPE_LOW_25GBASE_LR: 296 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 297 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 298 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 299 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 300 case ICE_PHY_TYPE_LOW_50GBASE_SR: 301 case ICE_PHY_TYPE_LOW_50GBASE_FR: 302 case ICE_PHY_TYPE_LOW_50GBASE_LR: 303 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 304 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 305 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 306 case ICE_PHY_TYPE_LOW_100GBASE_DR: 307 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 308 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 309 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 310 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 311 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 312 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 313 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 314 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 315 return ICE_MEDIA_FIBER; 316 case ICE_PHY_TYPE_LOW_100BASE_TX: 317 case ICE_PHY_TYPE_LOW_1000BASE_T: 318 case ICE_PHY_TYPE_LOW_2500BASE_T: 319 case ICE_PHY_TYPE_LOW_5GBASE_T: 320 case ICE_PHY_TYPE_LOW_10GBASE_T: 321 case ICE_PHY_TYPE_LOW_25GBASE_T: 322 return ICE_MEDIA_BASET; 323 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 324 case ICE_PHY_TYPE_LOW_25GBASE_CR: 325 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 326 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 327 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 328 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 329 case ICE_PHY_TYPE_LOW_50GBASE_CP: 330 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 331 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 332 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 333 return ICE_MEDIA_DA; 334 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 335 case ICE_PHY_TYPE_LOW_40G_XLAUI: 336 case ICE_PHY_TYPE_LOW_50G_LAUI2: 337 case ICE_PHY_TYPE_LOW_50G_AUI2: 338 case ICE_PHY_TYPE_LOW_50G_AUI1: 339 case ICE_PHY_TYPE_LOW_100G_AUI4: 340 case ICE_PHY_TYPE_LOW_100G_CAUI4: 341 if (ice_is_media_cage_present(pi)) 342 return ICE_MEDIA_DA; 343 fallthrough; 344 case ICE_PHY_TYPE_LOW_1000BASE_KX: 345 case ICE_PHY_TYPE_LOW_2500BASE_KX: 346 case ICE_PHY_TYPE_LOW_2500BASE_X: 347 case ICE_PHY_TYPE_LOW_5GBASE_KR: 348 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 349 case ICE_PHY_TYPE_LOW_25GBASE_KR: 350 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 351 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 352 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 353 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 354 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 355 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 356 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 357 return ICE_MEDIA_BACKPLANE; 358 } 359 } else { 360 switch (hw_link_info->phy_type_high) { 361 case ICE_PHY_TYPE_HIGH_100G_AUI2: 362 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 363 if (ice_is_media_cage_present(pi)) 364 return ICE_MEDIA_DA; 365 fallthrough; 366 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 367 return ICE_MEDIA_BACKPLANE; 368 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 369 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 370 return ICE_MEDIA_FIBER; 371 } 372 } 373 return ICE_MEDIA_UNKNOWN; 374 } 375 376 /** 377 * ice_aq_get_link_info 378 * @pi: port information structure 379 * @ena_lse: enable/disable LinkStatusEvent reporting 380 * @link: pointer to link status structure - optional 381 * @cd: pointer to command details structure or NULL 382 * 383 * Get Link Status (0x607). Returns the link status of the adapter. 384 */ 385 enum ice_status 386 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, 387 struct ice_link_status *link, struct ice_sq_cd *cd) 388 { 389 struct ice_aqc_get_link_status_data link_data = { 0 }; 390 struct ice_aqc_get_link_status *resp; 391 struct ice_link_status *li_old, *li; 392 enum ice_media_type *hw_media_type; 393 struct ice_fc_info *hw_fc_info; 394 bool tx_pause, rx_pause; 395 struct ice_aq_desc desc; 396 enum ice_status status; 397 struct ice_hw *hw; 398 u16 cmd_flags; 399 400 if (!pi) 401 return ICE_ERR_PARAM; 402 hw = pi->hw; 403 li_old = &pi->phy.link_info_old; 404 hw_media_type = &pi->phy.media_type; 405 li = &pi->phy.link_info; 406 hw_fc_info = &pi->fc; 407 408 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); 409 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS; 410 resp = &desc.params.get_link_status; 411 resp->cmd_flags = cpu_to_le16(cmd_flags); 412 resp->lport_num = pi->lport; 413 414 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd); 415 416 if (status) 417 return status; 418 419 /* save off old link status information */ 420 *li_old = *li; 421 422 /* update current link status information */ 423 li->link_speed = le16_to_cpu(link_data.link_speed); 424 li->phy_type_low = le64_to_cpu(link_data.phy_type_low); 425 li->phy_type_high = le64_to_cpu(link_data.phy_type_high); 426 *hw_media_type = ice_get_media_type(pi); 427 li->link_info = link_data.link_info; 428 li->link_cfg_err = link_data.link_cfg_err; 429 li->an_info = link_data.an_info; 430 li->ext_info = link_data.ext_info; 431 li->max_frame_size = le16_to_cpu(link_data.max_frame_size); 432 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; 433 li->topo_media_conflict = link_data.topo_media_conflict; 434 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M | 435 ICE_AQ_CFG_PACING_TYPE_M); 436 437 /* update fc info */ 438 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); 439 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX); 440 if (tx_pause && rx_pause) 441 hw_fc_info->current_mode = ICE_FC_FULL; 442 else if (tx_pause) 443 hw_fc_info->current_mode = ICE_FC_TX_PAUSE; 444 else if (rx_pause) 445 hw_fc_info->current_mode = ICE_FC_RX_PAUSE; 446 else 447 hw_fc_info->current_mode = ICE_FC_NONE; 448 449 li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); 450 451 ice_debug(hw, ICE_DBG_LINK, "get link info\n"); 452 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); 453 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 454 (unsigned long long)li->phy_type_low); 455 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 456 (unsigned long long)li->phy_type_high); 457 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); 458 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); 459 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err); 460 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); 461 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); 462 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); 463 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); 464 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", 465 li->max_frame_size); 466 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); 467 468 /* save link status information */ 469 if (link) 470 *link = *li; 471 472 /* flag cleared so calling functions don't call AQ again */ 473 pi->phy.get_link_info = false; 474 475 return 0; 476 } 477 478 /** 479 * ice_fill_tx_timer_and_fc_thresh 480 * @hw: pointer to the HW struct 481 * @cmd: pointer to MAC cfg structure 482 * 483 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command 484 * descriptor 485 */ 486 static void 487 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw, 488 struct ice_aqc_set_mac_cfg *cmd) 489 { 490 u16 fc_thres_val, tx_timer_val; 491 u32 val; 492 493 /* We read back the transmit timer and FC threshold value of 494 * LFC. Thus, we will use index = 495 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX. 496 * 497 * Also, because we are operating on transmit timer and FC 498 * threshold of LFC, we don't turn on any bit in tx_tmr_priority 499 */ 500 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 501 502 /* Retrieve the transmit timer */ 503 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC)); 504 tx_timer_val = val & 505 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M; 506 cmd->tx_tmr_value = cpu_to_le16(tx_timer_val); 507 508 /* Retrieve the FC threshold */ 509 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC)); 510 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M; 511 512 cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val); 513 } 514 515 /** 516 * ice_aq_set_mac_cfg 517 * @hw: pointer to the HW struct 518 * @max_frame_size: Maximum Frame Size to be supported 519 * @cd: pointer to command details structure or NULL 520 * 521 * Set MAC configuration (0x0603) 522 */ 523 enum ice_status 524 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd) 525 { 526 struct ice_aqc_set_mac_cfg *cmd; 527 struct ice_aq_desc desc; 528 529 cmd = &desc.params.set_mac_cfg; 530 531 if (max_frame_size == 0) 532 return ICE_ERR_PARAM; 533 534 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg); 535 536 cmd->max_frame_size = cpu_to_le16(max_frame_size); 537 538 ice_fill_tx_timer_and_fc_thresh(hw, cmd); 539 540 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 541 } 542 543 /** 544 * ice_init_fltr_mgmt_struct - initializes filter management list and locks 545 * @hw: pointer to the HW struct 546 */ 547 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) 548 { 549 struct ice_switch_info *sw; 550 enum ice_status status; 551 552 hw->switch_info = devm_kzalloc(ice_hw_to_dev(hw), 553 sizeof(*hw->switch_info), GFP_KERNEL); 554 sw = hw->switch_info; 555 556 if (!sw) 557 return ICE_ERR_NO_MEMORY; 558 559 INIT_LIST_HEAD(&sw->vsi_list_map_head); 560 561 status = ice_init_def_sw_recp(hw); 562 if (status) { 563 devm_kfree(ice_hw_to_dev(hw), hw->switch_info); 564 return status; 565 } 566 return 0; 567 } 568 569 /** 570 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks 571 * @hw: pointer to the HW struct 572 */ 573 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) 574 { 575 struct ice_switch_info *sw = hw->switch_info; 576 struct ice_vsi_list_map_info *v_pos_map; 577 struct ice_vsi_list_map_info *v_tmp_map; 578 struct ice_sw_recipe *recps; 579 u8 i; 580 581 list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, 582 list_entry) { 583 list_del(&v_pos_map->list_entry); 584 devm_kfree(ice_hw_to_dev(hw), v_pos_map); 585 } 586 recps = hw->switch_info->recp_list; 587 for (i = 0; i < ICE_SW_LKUP_LAST; i++) { 588 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; 589 590 recps[i].root_rid = i; 591 mutex_destroy(&recps[i].filt_rule_lock); 592 list_for_each_entry_safe(lst_itr, tmp_entry, 593 &recps[i].filt_rules, list_entry) { 594 list_del(&lst_itr->list_entry); 595 devm_kfree(ice_hw_to_dev(hw), lst_itr); 596 } 597 } 598 ice_rm_all_sw_replay_rule_info(hw); 599 devm_kfree(ice_hw_to_dev(hw), sw->recp_list); 600 devm_kfree(ice_hw_to_dev(hw), sw); 601 } 602 603 /** 604 * ice_get_fw_log_cfg - get FW logging configuration 605 * @hw: pointer to the HW struct 606 */ 607 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) 608 { 609 struct ice_aq_desc desc; 610 enum ice_status status; 611 __le16 *config; 612 u16 size; 613 614 size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; 615 config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); 616 if (!config) 617 return ICE_ERR_NO_MEMORY; 618 619 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); 620 621 status = ice_aq_send_cmd(hw, &desc, config, size, NULL); 622 if (!status) { 623 u16 i; 624 625 /* Save FW logging information into the HW structure */ 626 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 627 u16 v, m, flgs; 628 629 v = le16_to_cpu(config[i]); 630 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 631 flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; 632 633 if (m < ICE_AQC_FW_LOG_ID_MAX) 634 hw->fw_log.evnts[m].cur = flgs; 635 } 636 } 637 638 devm_kfree(ice_hw_to_dev(hw), config); 639 640 return status; 641 } 642 643 /** 644 * ice_cfg_fw_log - configure FW logging 645 * @hw: pointer to the HW struct 646 * @enable: enable certain FW logging events if true, disable all if false 647 * 648 * This function enables/disables the FW logging via Rx CQ events and a UART 649 * port based on predetermined configurations. FW logging via the Rx CQ can be 650 * enabled/disabled for individual PF's. However, FW logging via the UART can 651 * only be enabled/disabled for all PFs on the same device. 652 * 653 * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in 654 * hw->fw_log need to be set accordingly, e.g. based on user-provided input, 655 * before initializing the device. 656 * 657 * When re/configuring FW logging, callers need to update the "cfg" elements of 658 * the hw->fw_log.evnts array with the desired logging event configurations for 659 * modules of interest. When disabling FW logging completely, the callers can 660 * just pass false in the "enable" parameter. On completion, the function will 661 * update the "cur" element of the hw->fw_log.evnts array with the resulting 662 * logging event configurations of the modules that are being re/configured. FW 663 * logging modules that are not part of a reconfiguration operation retain their 664 * previous states. 665 * 666 * Before resetting the device, it is recommended that the driver disables FW 667 * logging before shutting down the control queue. When disabling FW logging 668 * ("enable" = false), the latest configurations of FW logging events stored in 669 * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after 670 * a device reset. 671 * 672 * When enabling FW logging to emit log messages via the Rx CQ during the 673 * device's initialization phase, a mechanism alternative to interrupt handlers 674 * needs to be used to extract FW log messages from the Rx CQ periodically and 675 * to prevent the Rx CQ from being full and stalling other types of control 676 * messages from FW to SW. Interrupts are typically disabled during the device's 677 * initialization phase. 678 */ 679 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) 680 { 681 struct ice_aqc_fw_logging *cmd; 682 enum ice_status status = 0; 683 u16 i, chgs = 0, len = 0; 684 struct ice_aq_desc desc; 685 __le16 *data = NULL; 686 u8 actv_evnts = 0; 687 void *buf = NULL; 688 689 if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) 690 return 0; 691 692 /* Disable FW logging only when the control queue is still responsive */ 693 if (!enable && 694 (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) 695 return 0; 696 697 /* Get current FW log settings */ 698 status = ice_get_fw_log_cfg(hw); 699 if (status) 700 return status; 701 702 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); 703 cmd = &desc.params.fw_logging; 704 705 /* Indicate which controls are valid */ 706 if (hw->fw_log.cq_en) 707 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; 708 709 if (hw->fw_log.uart_en) 710 cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; 711 712 if (enable) { 713 /* Fill in an array of entries with FW logging modules and 714 * logging events being reconfigured. 715 */ 716 for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { 717 u16 val; 718 719 /* Keep track of enabled event types */ 720 actv_evnts |= hw->fw_log.evnts[i].cfg; 721 722 if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) 723 continue; 724 725 if (!data) { 726 data = devm_kcalloc(ice_hw_to_dev(hw), 727 ICE_AQC_FW_LOG_ID_MAX, 728 sizeof(*data), 729 GFP_KERNEL); 730 if (!data) 731 return ICE_ERR_NO_MEMORY; 732 } 733 734 val = i << ICE_AQC_FW_LOG_ID_S; 735 val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; 736 data[chgs++] = cpu_to_le16(val); 737 } 738 739 /* Only enable FW logging if at least one module is specified. 740 * If FW logging is currently enabled but all modules are not 741 * enabled to emit log messages, disable FW logging altogether. 742 */ 743 if (actv_evnts) { 744 /* Leave if there is effectively no change */ 745 if (!chgs) 746 goto out; 747 748 if (hw->fw_log.cq_en) 749 cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; 750 751 if (hw->fw_log.uart_en) 752 cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; 753 754 buf = data; 755 len = sizeof(*data) * chgs; 756 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 757 } 758 } 759 760 status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); 761 if (!status) { 762 /* Update the current configuration to reflect events enabled. 763 * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW 764 * logging mode is enabled for the device. They do not reflect 765 * actual modules being enabled to emit log messages. So, their 766 * values remain unchanged even when all modules are disabled. 767 */ 768 u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; 769 770 hw->fw_log.actv_evnts = actv_evnts; 771 for (i = 0; i < cnt; i++) { 772 u16 v, m; 773 774 if (!enable) { 775 /* When disabling all FW logging events as part 776 * of device's de-initialization, the original 777 * configurations are retained, and can be used 778 * to reconfigure FW logging later if the device 779 * is re-initialized. 780 */ 781 hw->fw_log.evnts[i].cur = 0; 782 continue; 783 } 784 785 v = le16_to_cpu(data[i]); 786 m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; 787 hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; 788 } 789 } 790 791 out: 792 if (data) 793 devm_kfree(ice_hw_to_dev(hw), data); 794 795 return status; 796 } 797 798 /** 799 * ice_output_fw_log 800 * @hw: pointer to the HW struct 801 * @desc: pointer to the AQ message descriptor 802 * @buf: pointer to the buffer accompanying the AQ message 803 * 804 * Formats a FW Log message and outputs it via the standard driver logs. 805 */ 806 void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) 807 { 808 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n"); 809 ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf, 810 le16_to_cpu(desc->datalen)); 811 ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n"); 812 } 813 814 /** 815 * ice_get_itr_intrl_gran 816 * @hw: pointer to the HW struct 817 * 818 * Determines the ITR/INTRL granularities based on the maximum aggregate 819 * bandwidth according to the device's configuration during power-on. 820 */ 821 static void ice_get_itr_intrl_gran(struct ice_hw *hw) 822 { 823 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) & 824 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >> 825 GL_PWR_MODE_CTL_CAR_MAX_BW_S; 826 827 switch (max_agg_bw) { 828 case ICE_MAX_AGG_BW_200G: 829 case ICE_MAX_AGG_BW_100G: 830 case ICE_MAX_AGG_BW_50G: 831 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25; 832 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25; 833 break; 834 case ICE_MAX_AGG_BW_25G: 835 hw->itr_gran = ICE_ITR_GRAN_MAX_25; 836 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25; 837 break; 838 } 839 } 840 841 /** 842 * ice_init_hw - main hardware initialization routine 843 * @hw: pointer to the hardware structure 844 */ 845 enum ice_status ice_init_hw(struct ice_hw *hw) 846 { 847 struct ice_aqc_get_phy_caps_data *pcaps; 848 enum ice_status status; 849 u16 mac_buf_len; 850 void *mac_buf; 851 852 /* Set MAC type based on DeviceID */ 853 status = ice_set_mac_type(hw); 854 if (status) 855 return status; 856 857 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) & 858 PF_FUNC_RID_FUNC_NUM_M) >> 859 PF_FUNC_RID_FUNC_NUM_S; 860 861 status = ice_reset(hw, ICE_RESET_PFR); 862 if (status) 863 return status; 864 865 ice_get_itr_intrl_gran(hw); 866 867 status = ice_create_all_ctrlq(hw); 868 if (status) 869 goto err_unroll_cqinit; 870 871 /* Enable FW logging. Not fatal if this fails. */ 872 status = ice_cfg_fw_log(hw, true); 873 if (status) 874 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); 875 876 status = ice_clear_pf_cfg(hw); 877 if (status) 878 goto err_unroll_cqinit; 879 880 /* Set bit to enable Flow Director filters */ 881 wr32(hw, PFQF_FD_ENA, PFQF_FD_ENA_FD_ENA_M); 882 INIT_LIST_HEAD(&hw->fdir_list_head); 883 884 ice_clear_pxe_mode(hw); 885 886 status = ice_init_nvm(hw); 887 if (status) 888 goto err_unroll_cqinit; 889 890 status = ice_get_caps(hw); 891 if (status) 892 goto err_unroll_cqinit; 893 894 hw->port_info = devm_kzalloc(ice_hw_to_dev(hw), 895 sizeof(*hw->port_info), GFP_KERNEL); 896 if (!hw->port_info) { 897 status = ICE_ERR_NO_MEMORY; 898 goto err_unroll_cqinit; 899 } 900 901 /* set the back pointer to HW */ 902 hw->port_info->hw = hw; 903 904 /* Initialize port_info struct with switch configuration data */ 905 status = ice_get_initial_sw_cfg(hw); 906 if (status) 907 goto err_unroll_alloc; 908 909 hw->evb_veb = true; 910 911 /* Query the allocated resources for Tx scheduler */ 912 status = ice_sched_query_res_alloc(hw); 913 if (status) { 914 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n"); 915 goto err_unroll_alloc; 916 } 917 ice_sched_get_psm_clk_freq(hw); 918 919 /* Initialize port_info struct with scheduler data */ 920 status = ice_sched_init_port(hw->port_info); 921 if (status) 922 goto err_unroll_sched; 923 924 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 925 if (!pcaps) { 926 status = ICE_ERR_NO_MEMORY; 927 goto err_unroll_sched; 928 } 929 930 /* Initialize port_info struct with PHY capabilities */ 931 status = ice_aq_get_phy_caps(hw->port_info, false, 932 ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, 933 NULL); 934 devm_kfree(ice_hw_to_dev(hw), pcaps); 935 if (status) 936 dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", 937 status); 938 939 /* Initialize port_info struct with link information */ 940 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL); 941 if (status) 942 goto err_unroll_sched; 943 944 /* need a valid SW entry point to build a Tx tree */ 945 if (!hw->sw_entry_point_layer) { 946 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); 947 status = ICE_ERR_CFG; 948 goto err_unroll_sched; 949 } 950 INIT_LIST_HEAD(&hw->agg_list); 951 /* Initialize max burst size */ 952 if (!hw->max_burst_size) 953 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE); 954 955 status = ice_init_fltr_mgmt_struct(hw); 956 if (status) 957 goto err_unroll_sched; 958 959 /* Get MAC information */ 960 /* A single port can report up to two (LAN and WoL) addresses */ 961 mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, 962 sizeof(struct ice_aqc_manage_mac_read_resp), 963 GFP_KERNEL); 964 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); 965 966 if (!mac_buf) { 967 status = ICE_ERR_NO_MEMORY; 968 goto err_unroll_fltr_mgmt_struct; 969 } 970 971 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); 972 devm_kfree(ice_hw_to_dev(hw), mac_buf); 973 974 if (status) 975 goto err_unroll_fltr_mgmt_struct; 976 /* enable jumbo frame support at MAC level */ 977 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL); 978 if (status) 979 goto err_unroll_fltr_mgmt_struct; 980 /* Obtain counter base index which would be used by flow director */ 981 status = ice_alloc_fd_res_cntr(hw, &hw->fd_ctr_base); 982 if (status) 983 goto err_unroll_fltr_mgmt_struct; 984 status = ice_init_hw_tbls(hw); 985 if (status) 986 goto err_unroll_fltr_mgmt_struct; 987 mutex_init(&hw->tnl_lock); 988 return 0; 989 990 err_unroll_fltr_mgmt_struct: 991 ice_cleanup_fltr_mgmt_struct(hw); 992 err_unroll_sched: 993 ice_sched_cleanup_all(hw); 994 err_unroll_alloc: 995 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 996 err_unroll_cqinit: 997 ice_destroy_all_ctrlq(hw); 998 return status; 999 } 1000 1001 /** 1002 * ice_deinit_hw - unroll initialization operations done by ice_init_hw 1003 * @hw: pointer to the hardware structure 1004 * 1005 * This should be called only during nominal operation, not as a result of 1006 * ice_init_hw() failing since ice_init_hw() will take care of unrolling 1007 * applicable initializations if it fails for any reason. 1008 */ 1009 void ice_deinit_hw(struct ice_hw *hw) 1010 { 1011 ice_free_fd_res_cntr(hw, hw->fd_ctr_base); 1012 ice_cleanup_fltr_mgmt_struct(hw); 1013 1014 ice_sched_cleanup_all(hw); 1015 ice_sched_clear_agg(hw); 1016 ice_free_seg(hw); 1017 ice_free_hw_tbls(hw); 1018 mutex_destroy(&hw->tnl_lock); 1019 1020 if (hw->port_info) { 1021 devm_kfree(ice_hw_to_dev(hw), hw->port_info); 1022 hw->port_info = NULL; 1023 } 1024 1025 /* Attempt to disable FW logging before shutting down control queues */ 1026 ice_cfg_fw_log(hw, false); 1027 ice_destroy_all_ctrlq(hw); 1028 1029 /* Clear VSI contexts if not already cleared */ 1030 ice_clear_all_vsi_ctx(hw); 1031 } 1032 1033 /** 1034 * ice_check_reset - Check to see if a global reset is complete 1035 * @hw: pointer to the hardware structure 1036 */ 1037 enum ice_status ice_check_reset(struct ice_hw *hw) 1038 { 1039 u32 cnt, reg = 0, grst_timeout, uld_mask; 1040 1041 /* Poll for Device Active state in case a recent CORER, GLOBR, 1042 * or EMPR has occurred. The grst delay value is in 100ms units. 1043 * Add 1sec for outstanding AQ commands that can take a long time. 1044 */ 1045 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> 1046 GLGEN_RSTCTL_GRSTDEL_S) + 10; 1047 1048 for (cnt = 0; cnt < grst_timeout; cnt++) { 1049 mdelay(100); 1050 reg = rd32(hw, GLGEN_RSTAT); 1051 if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) 1052 break; 1053 } 1054 1055 if (cnt == grst_timeout) { 1056 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); 1057 return ICE_ERR_RESET_FAILED; 1058 } 1059 1060 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\ 1061 GLNVM_ULD_PCIER_DONE_1_M |\ 1062 GLNVM_ULD_CORER_DONE_M |\ 1063 GLNVM_ULD_GLOBR_DONE_M |\ 1064 GLNVM_ULD_POR_DONE_M |\ 1065 GLNVM_ULD_POR_DONE_1_M |\ 1066 GLNVM_ULD_PCIER_DONE_2_M) 1067 1068 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ? 1069 GLNVM_ULD_PE_DONE_M : 0); 1070 1071 /* Device is Active; check Global Reset processes are done */ 1072 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { 1073 reg = rd32(hw, GLNVM_ULD) & uld_mask; 1074 if (reg == uld_mask) { 1075 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt); 1076 break; 1077 } 1078 mdelay(10); 1079 } 1080 1081 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1082 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n", 1083 reg); 1084 return ICE_ERR_RESET_FAILED; 1085 } 1086 1087 return 0; 1088 } 1089 1090 /** 1091 * ice_pf_reset - Reset the PF 1092 * @hw: pointer to the hardware structure 1093 * 1094 * If a global reset has been triggered, this function checks 1095 * for its completion and then issues the PF reset 1096 */ 1097 static enum ice_status ice_pf_reset(struct ice_hw *hw) 1098 { 1099 u32 cnt, reg; 1100 1101 /* If at function entry a global reset was already in progress, i.e. 1102 * state is not 'device active' or any of the reset done bits are not 1103 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the 1104 * global reset is done. 1105 */ 1106 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) || 1107 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) { 1108 /* poll on global reset currently in progress until done */ 1109 if (ice_check_reset(hw)) 1110 return ICE_ERR_RESET_FAILED; 1111 1112 return 0; 1113 } 1114 1115 /* Reset the PF */ 1116 reg = rd32(hw, PFGEN_CTRL); 1117 1118 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M)); 1119 1120 /* Wait for the PFR to complete. The wait time is the global config lock 1121 * timeout plus the PFR timeout which will account for a possible reset 1122 * that is occurring during a download package operation. 1123 */ 1124 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT + 1125 ICE_PF_RESET_WAIT_COUNT; cnt++) { 1126 reg = rd32(hw, PFGEN_CTRL); 1127 if (!(reg & PFGEN_CTRL_PFSWR_M)) 1128 break; 1129 1130 mdelay(1); 1131 } 1132 1133 if (cnt == ICE_PF_RESET_WAIT_COUNT) { 1134 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n"); 1135 return ICE_ERR_RESET_FAILED; 1136 } 1137 1138 return 0; 1139 } 1140 1141 /** 1142 * ice_reset - Perform different types of reset 1143 * @hw: pointer to the hardware structure 1144 * @req: reset request 1145 * 1146 * This function triggers a reset as specified by the req parameter. 1147 * 1148 * Note: 1149 * If anything other than a PF reset is triggered, PXE mode is restored. 1150 * This has to be cleared using ice_clear_pxe_mode again, once the AQ 1151 * interface has been restored in the rebuild flow. 1152 */ 1153 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) 1154 { 1155 u32 val = 0; 1156 1157 switch (req) { 1158 case ICE_RESET_PFR: 1159 return ice_pf_reset(hw); 1160 case ICE_RESET_CORER: 1161 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n"); 1162 val = GLGEN_RTRIG_CORER_M; 1163 break; 1164 case ICE_RESET_GLOBR: 1165 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); 1166 val = GLGEN_RTRIG_GLOBR_M; 1167 break; 1168 default: 1169 return ICE_ERR_PARAM; 1170 } 1171 1172 val |= rd32(hw, GLGEN_RTRIG); 1173 wr32(hw, GLGEN_RTRIG, val); 1174 ice_flush(hw); 1175 1176 /* wait for the FW to be ready */ 1177 return ice_check_reset(hw); 1178 } 1179 1180 /** 1181 * ice_copy_rxq_ctx_to_hw 1182 * @hw: pointer to the hardware structure 1183 * @ice_rxq_ctx: pointer to the rxq context 1184 * @rxq_index: the index of the Rx queue 1185 * 1186 * Copies rxq context from dense structure to HW register space 1187 */ 1188 static enum ice_status 1189 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) 1190 { 1191 u8 i; 1192 1193 if (!ice_rxq_ctx) 1194 return ICE_ERR_BAD_PTR; 1195 1196 if (rxq_index > QRX_CTRL_MAX_INDEX) 1197 return ICE_ERR_PARAM; 1198 1199 /* Copy each dword separately to HW */ 1200 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { 1201 wr32(hw, QRX_CONTEXT(i, rxq_index), 1202 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1203 1204 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, 1205 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); 1206 } 1207 1208 return 0; 1209 } 1210 1211 /* LAN Rx Queue Context */ 1212 static const struct ice_ctx_ele ice_rlan_ctx_info[] = { 1213 /* Field Width LSB */ 1214 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), 1215 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), 1216 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), 1217 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89), 1218 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102), 1219 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109), 1220 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114), 1221 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116), 1222 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117), 1223 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119), 1224 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120), 1225 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124), 1226 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127), 1227 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174), 1228 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193), 1229 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194), 1230 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195), 1231 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), 1232 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), 1233 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), 1234 { 0 } 1235 }; 1236 1237 /** 1238 * ice_write_rxq_ctx 1239 * @hw: pointer to the hardware structure 1240 * @rlan_ctx: pointer to the rxq context 1241 * @rxq_index: the index of the Rx queue 1242 * 1243 * Converts rxq context from sparse to dense structure and then writes 1244 * it to HW register space and enables the hardware to prefetch descriptors 1245 * instead of only fetching them on demand 1246 */ 1247 enum ice_status 1248 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, 1249 u32 rxq_index) 1250 { 1251 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; 1252 1253 if (!rlan_ctx) 1254 return ICE_ERR_BAD_PTR; 1255 1256 rlan_ctx->prefena = 1; 1257 1258 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); 1259 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); 1260 } 1261 1262 /* LAN Tx Queue Context */ 1263 const struct ice_ctx_ele ice_tlan_ctx_info[] = { 1264 /* Field Width LSB */ 1265 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), 1266 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), 1267 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60), 1268 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65), 1269 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68), 1270 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78), 1271 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80), 1272 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90), 1273 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91), 1274 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92), 1275 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93), 1276 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101), 1277 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102), 1278 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103), 1279 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104), 1280 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105), 1281 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114), 1282 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128), 1283 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129), 1284 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135), 1285 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148), 1286 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152), 1287 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153), 1288 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164), 1289 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), 1290 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), 1291 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), 1292 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), 1293 { 0 } 1294 }; 1295 1296 /* FW Admin Queue command wrappers */ 1297 1298 /* Software lock/mutex that is meant to be held while the Global Config Lock 1299 * in firmware is acquired by the software to prevent most (but not all) types 1300 * of AQ commands from being sent to FW 1301 */ 1302 DEFINE_MUTEX(ice_global_cfg_lock_sw); 1303 1304 /** 1305 * ice_should_retry_sq_send_cmd 1306 * @opcode: AQ opcode 1307 * 1308 * Decide if we should retry the send command routine for the ATQ, depending 1309 * on the opcode. 1310 */ 1311 static bool ice_should_retry_sq_send_cmd(u16 opcode) 1312 { 1313 switch (opcode) { 1314 case ice_aqc_opc_get_link_topo: 1315 case ice_aqc_opc_lldp_stop: 1316 case ice_aqc_opc_lldp_start: 1317 case ice_aqc_opc_lldp_filter_ctrl: 1318 return true; 1319 } 1320 1321 return false; 1322 } 1323 1324 /** 1325 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ) 1326 * @hw: pointer to the HW struct 1327 * @cq: pointer to the specific Control queue 1328 * @desc: prefilled descriptor describing the command 1329 * @buf: buffer to use for indirect commands (or NULL for direct commands) 1330 * @buf_size: size of buffer for indirect commands (or 0 for direct commands) 1331 * @cd: pointer to command details structure 1332 * 1333 * Retry sending the FW Admin Queue command, multiple times, to the FW Admin 1334 * Queue if the EBUSY AQ error is returned. 1335 */ 1336 static enum ice_status 1337 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, 1338 struct ice_aq_desc *desc, void *buf, u16 buf_size, 1339 struct ice_sq_cd *cd) 1340 { 1341 struct ice_aq_desc desc_cpy; 1342 enum ice_status status; 1343 bool is_cmd_for_retry; 1344 u8 *buf_cpy = NULL; 1345 u8 idx = 0; 1346 u16 opcode; 1347 1348 opcode = le16_to_cpu(desc->opcode); 1349 is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode); 1350 memset(&desc_cpy, 0, sizeof(desc_cpy)); 1351 1352 if (is_cmd_for_retry) { 1353 if (buf) { 1354 buf_cpy = kzalloc(buf_size, GFP_KERNEL); 1355 if (!buf_cpy) 1356 return ICE_ERR_NO_MEMORY; 1357 } 1358 1359 memcpy(&desc_cpy, desc, sizeof(desc_cpy)); 1360 } 1361 1362 do { 1363 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd); 1364 1365 if (!is_cmd_for_retry || !status || 1366 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) 1367 break; 1368 1369 if (buf_cpy) 1370 memcpy(buf, buf_cpy, buf_size); 1371 1372 memcpy(desc, &desc_cpy, sizeof(desc_cpy)); 1373 1374 mdelay(ICE_SQ_SEND_DELAY_TIME_MS); 1375 1376 } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); 1377 1378 kfree(buf_cpy); 1379 1380 return status; 1381 } 1382 1383 /** 1384 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue 1385 * @hw: pointer to the HW struct 1386 * @desc: descriptor describing the command 1387 * @buf: buffer to use for indirect commands (NULL for direct commands) 1388 * @buf_size: size of buffer for indirect commands (0 for direct commands) 1389 * @cd: pointer to command details structure 1390 * 1391 * Helper function to send FW Admin Queue commands to the FW Admin Queue. 1392 */ 1393 enum ice_status 1394 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, 1395 u16 buf_size, struct ice_sq_cd *cd) 1396 { 1397 struct ice_aqc_req_res *cmd = &desc->params.res_owner; 1398 bool lock_acquired = false; 1399 enum ice_status status; 1400 1401 /* When a package download is in process (i.e. when the firmware's 1402 * Global Configuration Lock resource is held), only the Download 1403 * Package, Get Version, Get Package Info List and Release Resource 1404 * (with resource ID set to Global Config Lock) AdminQ commands are 1405 * allowed; all others must block until the package download completes 1406 * and the Global Config Lock is released. See also 1407 * ice_acquire_global_cfg_lock(). 1408 */ 1409 switch (le16_to_cpu(desc->opcode)) { 1410 case ice_aqc_opc_download_pkg: 1411 case ice_aqc_opc_get_pkg_info_list: 1412 case ice_aqc_opc_get_ver: 1413 break; 1414 case ice_aqc_opc_release_res: 1415 if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK) 1416 break; 1417 fallthrough; 1418 default: 1419 mutex_lock(&ice_global_cfg_lock_sw); 1420 lock_acquired = true; 1421 break; 1422 } 1423 1424 status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd); 1425 if (lock_acquired) 1426 mutex_unlock(&ice_global_cfg_lock_sw); 1427 1428 return status; 1429 } 1430 1431 /** 1432 * ice_aq_get_fw_ver 1433 * @hw: pointer to the HW struct 1434 * @cd: pointer to command details structure or NULL 1435 * 1436 * Get the firmware version (0x0001) from the admin queue commands 1437 */ 1438 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd) 1439 { 1440 struct ice_aqc_get_ver *resp; 1441 struct ice_aq_desc desc; 1442 enum ice_status status; 1443 1444 resp = &desc.params.get_ver; 1445 1446 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver); 1447 1448 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1449 1450 if (!status) { 1451 hw->fw_branch = resp->fw_branch; 1452 hw->fw_maj_ver = resp->fw_major; 1453 hw->fw_min_ver = resp->fw_minor; 1454 hw->fw_patch = resp->fw_patch; 1455 hw->fw_build = le32_to_cpu(resp->fw_build); 1456 hw->api_branch = resp->api_branch; 1457 hw->api_maj_ver = resp->api_major; 1458 hw->api_min_ver = resp->api_minor; 1459 hw->api_patch = resp->api_patch; 1460 } 1461 1462 return status; 1463 } 1464 1465 /** 1466 * ice_aq_send_driver_ver 1467 * @hw: pointer to the HW struct 1468 * @dv: driver's major, minor version 1469 * @cd: pointer to command details structure or NULL 1470 * 1471 * Send the driver version (0x0002) to the firmware 1472 */ 1473 enum ice_status 1474 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv, 1475 struct ice_sq_cd *cd) 1476 { 1477 struct ice_aqc_driver_ver *cmd; 1478 struct ice_aq_desc desc; 1479 u16 len; 1480 1481 cmd = &desc.params.driver_ver; 1482 1483 if (!dv) 1484 return ICE_ERR_PARAM; 1485 1486 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver); 1487 1488 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1489 cmd->major_ver = dv->major_ver; 1490 cmd->minor_ver = dv->minor_ver; 1491 cmd->build_ver = dv->build_ver; 1492 cmd->subbuild_ver = dv->subbuild_ver; 1493 1494 len = 0; 1495 while (len < sizeof(dv->driver_string) && 1496 isascii(dv->driver_string[len]) && dv->driver_string[len]) 1497 len++; 1498 1499 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd); 1500 } 1501 1502 /** 1503 * ice_aq_q_shutdown 1504 * @hw: pointer to the HW struct 1505 * @unloading: is the driver unloading itself 1506 * 1507 * Tell the Firmware that we're shutting down the AdminQ and whether 1508 * or not the driver is unloading as well (0x0003). 1509 */ 1510 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) 1511 { 1512 struct ice_aqc_q_shutdown *cmd; 1513 struct ice_aq_desc desc; 1514 1515 cmd = &desc.params.q_shutdown; 1516 1517 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown); 1518 1519 if (unloading) 1520 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING; 1521 1522 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 1523 } 1524 1525 /** 1526 * ice_aq_req_res 1527 * @hw: pointer to the HW struct 1528 * @res: resource ID 1529 * @access: access type 1530 * @sdp_number: resource number 1531 * @timeout: the maximum time in ms that the driver may hold the resource 1532 * @cd: pointer to command details structure or NULL 1533 * 1534 * Requests common resource using the admin queue commands (0x0008). 1535 * When attempting to acquire the Global Config Lock, the driver can 1536 * learn of three states: 1537 * 1) ICE_SUCCESS - acquired lock, and can perform download package 1538 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load 1539 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has 1540 * successfully downloaded the package; the driver does 1541 * not have to download the package and can continue 1542 * loading 1543 * 1544 * Note that if the caller is in an acquire lock, perform action, release lock 1545 * phase of operation, it is possible that the FW may detect a timeout and issue 1546 * a CORER. In this case, the driver will receive a CORER interrupt and will 1547 * have to determine its cause. The calling thread that is handling this flow 1548 * will likely get an error propagated back to it indicating the Download 1549 * Package, Update Package or the Release Resource AQ commands timed out. 1550 */ 1551 static enum ice_status 1552 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1553 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout, 1554 struct ice_sq_cd *cd) 1555 { 1556 struct ice_aqc_req_res *cmd_resp; 1557 struct ice_aq_desc desc; 1558 enum ice_status status; 1559 1560 cmd_resp = &desc.params.res_owner; 1561 1562 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res); 1563 1564 cmd_resp->res_id = cpu_to_le16(res); 1565 cmd_resp->access_type = cpu_to_le16(access); 1566 cmd_resp->res_number = cpu_to_le32(sdp_number); 1567 cmd_resp->timeout = cpu_to_le32(*timeout); 1568 *timeout = 0; 1569 1570 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1571 1572 /* The completion specifies the maximum time in ms that the driver 1573 * may hold the resource in the Timeout field. 1574 */ 1575 1576 /* Global config lock response utilizes an additional status field. 1577 * 1578 * If the Global config lock resource is held by some other driver, the 1579 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field 1580 * and the timeout field indicates the maximum time the current owner 1581 * of the resource has to free it. 1582 */ 1583 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { 1584 if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { 1585 *timeout = le32_to_cpu(cmd_resp->timeout); 1586 return 0; 1587 } else if (le16_to_cpu(cmd_resp->status) == 1588 ICE_AQ_RES_GLBL_IN_PROG) { 1589 *timeout = le32_to_cpu(cmd_resp->timeout); 1590 return ICE_ERR_AQ_ERROR; 1591 } else if (le16_to_cpu(cmd_resp->status) == 1592 ICE_AQ_RES_GLBL_DONE) { 1593 return ICE_ERR_AQ_NO_WORK; 1594 } 1595 1596 /* invalid FW response, force a timeout immediately */ 1597 *timeout = 0; 1598 return ICE_ERR_AQ_ERROR; 1599 } 1600 1601 /* If the resource is held by some other driver, the command completes 1602 * with a busy return value and the timeout field indicates the maximum 1603 * time the current owner of the resource has to free it. 1604 */ 1605 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) 1606 *timeout = le32_to_cpu(cmd_resp->timeout); 1607 1608 return status; 1609 } 1610 1611 /** 1612 * ice_aq_release_res 1613 * @hw: pointer to the HW struct 1614 * @res: resource ID 1615 * @sdp_number: resource number 1616 * @cd: pointer to command details structure or NULL 1617 * 1618 * release common resource using the admin queue commands (0x0009) 1619 */ 1620 static enum ice_status 1621 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, 1622 struct ice_sq_cd *cd) 1623 { 1624 struct ice_aqc_req_res *cmd; 1625 struct ice_aq_desc desc; 1626 1627 cmd = &desc.params.res_owner; 1628 1629 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res); 1630 1631 cmd->res_id = cpu_to_le16(res); 1632 cmd->res_number = cpu_to_le32(sdp_number); 1633 1634 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 1635 } 1636 1637 /** 1638 * ice_acquire_res 1639 * @hw: pointer to the HW structure 1640 * @res: resource ID 1641 * @access: access type (read or write) 1642 * @timeout: timeout in milliseconds 1643 * 1644 * This function will attempt to acquire the ownership of a resource. 1645 */ 1646 enum ice_status 1647 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, 1648 enum ice_aq_res_access_type access, u32 timeout) 1649 { 1650 #define ICE_RES_POLLING_DELAY_MS 10 1651 u32 delay = ICE_RES_POLLING_DELAY_MS; 1652 u32 time_left = timeout; 1653 enum ice_status status; 1654 1655 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1656 1657 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has 1658 * previously acquired the resource and performed any necessary updates; 1659 * in this case the caller does not obtain the resource and has no 1660 * further work to do. 1661 */ 1662 if (status == ICE_ERR_AQ_NO_WORK) 1663 goto ice_acquire_res_exit; 1664 1665 if (status) 1666 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access); 1667 1668 /* If necessary, poll until the current lock owner timeouts */ 1669 timeout = time_left; 1670 while (status && timeout && time_left) { 1671 mdelay(delay); 1672 timeout = (timeout > delay) ? timeout - delay : 0; 1673 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); 1674 1675 if (status == ICE_ERR_AQ_NO_WORK) 1676 /* lock free, but no work to do */ 1677 break; 1678 1679 if (!status) 1680 /* lock acquired */ 1681 break; 1682 } 1683 if (status && status != ICE_ERR_AQ_NO_WORK) 1684 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n"); 1685 1686 ice_acquire_res_exit: 1687 if (status == ICE_ERR_AQ_NO_WORK) { 1688 if (access == ICE_RES_WRITE) 1689 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n"); 1690 else 1691 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n"); 1692 } 1693 return status; 1694 } 1695 1696 /** 1697 * ice_release_res 1698 * @hw: pointer to the HW structure 1699 * @res: resource ID 1700 * 1701 * This function will release a resource using the proper Admin Command. 1702 */ 1703 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) 1704 { 1705 enum ice_status status; 1706 u32 total_delay = 0; 1707 1708 status = ice_aq_release_res(hw, res, 0, NULL); 1709 1710 /* there are some rare cases when trying to release the resource 1711 * results in an admin queue timeout, so handle them correctly 1712 */ 1713 while ((status == ICE_ERR_AQ_TIMEOUT) && 1714 (total_delay < hw->adminq.sq_cmd_timeout)) { 1715 mdelay(1); 1716 status = ice_aq_release_res(hw, res, 0, NULL); 1717 total_delay++; 1718 } 1719 } 1720 1721 /** 1722 * ice_aq_alloc_free_res - command to allocate/free resources 1723 * @hw: pointer to the HW struct 1724 * @num_entries: number of resource entries in buffer 1725 * @buf: Indirect buffer to hold data parameters and response 1726 * @buf_size: size of buffer for indirect commands 1727 * @opc: pass in the command opcode 1728 * @cd: pointer to command details structure or NULL 1729 * 1730 * Helper function to allocate/free resources using the admin queue commands 1731 */ 1732 enum ice_status 1733 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, 1734 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size, 1735 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 1736 { 1737 struct ice_aqc_alloc_free_res_cmd *cmd; 1738 struct ice_aq_desc desc; 1739 1740 cmd = &desc.params.sw_res_ctrl; 1741 1742 if (!buf) 1743 return ICE_ERR_PARAM; 1744 1745 if (buf_size < flex_array_size(buf, elem, num_entries)) 1746 return ICE_ERR_PARAM; 1747 1748 ice_fill_dflt_direct_cmd_desc(&desc, opc); 1749 1750 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 1751 1752 cmd->num_entries = cpu_to_le16(num_entries); 1753 1754 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 1755 } 1756 1757 /** 1758 * ice_alloc_hw_res - allocate resource 1759 * @hw: pointer to the HW struct 1760 * @type: type of resource 1761 * @num: number of resources to allocate 1762 * @btm: allocate from bottom 1763 * @res: pointer to array that will receive the resources 1764 */ 1765 enum ice_status 1766 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) 1767 { 1768 struct ice_aqc_alloc_free_res_elem *buf; 1769 enum ice_status status; 1770 u16 buf_len; 1771 1772 buf_len = struct_size(buf, elem, num); 1773 buf = kzalloc(buf_len, GFP_KERNEL); 1774 if (!buf) 1775 return ICE_ERR_NO_MEMORY; 1776 1777 /* Prepare buffer to allocate resource. */ 1778 buf->num_elems = cpu_to_le16(num); 1779 buf->res_type = cpu_to_le16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED | 1780 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX); 1781 if (btm) 1782 buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM); 1783 1784 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len, 1785 ice_aqc_opc_alloc_res, NULL); 1786 if (status) 1787 goto ice_alloc_res_exit; 1788 1789 memcpy(res, buf->elem, sizeof(*buf->elem) * num); 1790 1791 ice_alloc_res_exit: 1792 kfree(buf); 1793 return status; 1794 } 1795 1796 /** 1797 * ice_free_hw_res - free allocated HW resource 1798 * @hw: pointer to the HW struct 1799 * @type: type of resource to free 1800 * @num: number of resources 1801 * @res: pointer to array that contains the resources to free 1802 */ 1803 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) 1804 { 1805 struct ice_aqc_alloc_free_res_elem *buf; 1806 enum ice_status status; 1807 u16 buf_len; 1808 1809 buf_len = struct_size(buf, elem, num); 1810 buf = kzalloc(buf_len, GFP_KERNEL); 1811 if (!buf) 1812 return ICE_ERR_NO_MEMORY; 1813 1814 /* Prepare buffer to free resource. */ 1815 buf->num_elems = cpu_to_le16(num); 1816 buf->res_type = cpu_to_le16(type); 1817 memcpy(buf->elem, res, sizeof(*buf->elem) * num); 1818 1819 status = ice_aq_alloc_free_res(hw, num, buf, buf_len, 1820 ice_aqc_opc_free_res, NULL); 1821 if (status) 1822 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n"); 1823 1824 kfree(buf); 1825 return status; 1826 } 1827 1828 /** 1829 * ice_get_num_per_func - determine number of resources per PF 1830 * @hw: pointer to the HW structure 1831 * @max: value to be evenly split between each PF 1832 * 1833 * Determine the number of valid functions by going through the bitmap returned 1834 * from parsing capabilities and use this to calculate the number of resources 1835 * per PF based on the max value passed in. 1836 */ 1837 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) 1838 { 1839 u8 funcs; 1840 1841 #define ICE_CAPS_VALID_FUNCS_M 0xFF 1842 funcs = hweight8(hw->dev_caps.common_cap.valid_functions & 1843 ICE_CAPS_VALID_FUNCS_M); 1844 1845 if (!funcs) 1846 return 0; 1847 1848 return max / funcs; 1849 } 1850 1851 /** 1852 * ice_parse_common_caps - parse common device/function capabilities 1853 * @hw: pointer to the HW struct 1854 * @caps: pointer to common capabilities structure 1855 * @elem: the capability element to parse 1856 * @prefix: message prefix for tracing capabilities 1857 * 1858 * Given a capability element, extract relevant details into the common 1859 * capability structure. 1860 * 1861 * Returns: true if the capability matches one of the common capability ids, 1862 * false otherwise. 1863 */ 1864 static bool 1865 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, 1866 struct ice_aqc_list_caps_elem *elem, const char *prefix) 1867 { 1868 u32 logical_id = le32_to_cpu(elem->logical_id); 1869 u32 phys_id = le32_to_cpu(elem->phys_id); 1870 u32 number = le32_to_cpu(elem->number); 1871 u16 cap = le16_to_cpu(elem->cap); 1872 bool found = true; 1873 1874 switch (cap) { 1875 case ICE_AQC_CAPS_VALID_FUNCTIONS: 1876 caps->valid_functions = number; 1877 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix, 1878 caps->valid_functions); 1879 break; 1880 case ICE_AQC_CAPS_SRIOV: 1881 caps->sr_iov_1_1 = (number == 1); 1882 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix, 1883 caps->sr_iov_1_1); 1884 break; 1885 case ICE_AQC_CAPS_DCB: 1886 caps->dcb = (number == 1); 1887 caps->active_tc_bitmap = logical_id; 1888 caps->maxtc = phys_id; 1889 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb); 1890 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix, 1891 caps->active_tc_bitmap); 1892 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc); 1893 break; 1894 case ICE_AQC_CAPS_RSS: 1895 caps->rss_table_size = number; 1896 caps->rss_table_entry_width = logical_id; 1897 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix, 1898 caps->rss_table_size); 1899 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix, 1900 caps->rss_table_entry_width); 1901 break; 1902 case ICE_AQC_CAPS_RXQS: 1903 caps->num_rxq = number; 1904 caps->rxq_first_id = phys_id; 1905 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix, 1906 caps->num_rxq); 1907 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix, 1908 caps->rxq_first_id); 1909 break; 1910 case ICE_AQC_CAPS_TXQS: 1911 caps->num_txq = number; 1912 caps->txq_first_id = phys_id; 1913 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix, 1914 caps->num_txq); 1915 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix, 1916 caps->txq_first_id); 1917 break; 1918 case ICE_AQC_CAPS_MSIX: 1919 caps->num_msix_vectors = number; 1920 caps->msix_vector_first_id = phys_id; 1921 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix, 1922 caps->num_msix_vectors); 1923 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix, 1924 caps->msix_vector_first_id); 1925 break; 1926 case ICE_AQC_CAPS_PENDING_NVM_VER: 1927 caps->nvm_update_pending_nvm = true; 1928 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); 1929 break; 1930 case ICE_AQC_CAPS_PENDING_OROM_VER: 1931 caps->nvm_update_pending_orom = true; 1932 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); 1933 break; 1934 case ICE_AQC_CAPS_PENDING_NET_VER: 1935 caps->nvm_update_pending_netlist = true; 1936 ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); 1937 break; 1938 case ICE_AQC_CAPS_NVM_MGMT: 1939 caps->nvm_unified_update = 1940 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? 1941 true : false; 1942 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, 1943 caps->nvm_unified_update); 1944 break; 1945 case ICE_AQC_CAPS_RDMA: 1946 caps->rdma = (number == 1); 1947 ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); 1948 break; 1949 case ICE_AQC_CAPS_MAX_MTU: 1950 caps->max_mtu = number; 1951 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", 1952 prefix, caps->max_mtu); 1953 break; 1954 default: 1955 /* Not one of the recognized common capabilities */ 1956 found = false; 1957 } 1958 1959 return found; 1960 } 1961 1962 /** 1963 * ice_recalc_port_limited_caps - Recalculate port limited capabilities 1964 * @hw: pointer to the HW structure 1965 * @caps: pointer to capabilities structure to fix 1966 * 1967 * Re-calculate the capabilities that are dependent on the number of physical 1968 * ports; i.e. some features are not supported or function differently on 1969 * devices with more than 4 ports. 1970 */ 1971 static void 1972 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) 1973 { 1974 /* This assumes device capabilities are always scanned before function 1975 * capabilities during the initialization flow. 1976 */ 1977 if (hw->dev_caps.num_funcs > 4) { 1978 /* Max 4 TCs per port */ 1979 caps->maxtc = 4; 1980 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n", 1981 caps->maxtc); 1982 if (caps->rdma) { 1983 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n"); 1984 caps->rdma = 0; 1985 } 1986 1987 /* print message only when processing device capabilities 1988 * during initialization. 1989 */ 1990 if (caps == &hw->dev_caps.common_cap) 1991 dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n"); 1992 } 1993 } 1994 1995 /** 1996 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps 1997 * @hw: pointer to the HW struct 1998 * @func_p: pointer to function capabilities structure 1999 * @cap: pointer to the capability element to parse 2000 * 2001 * Extract function capabilities for ICE_AQC_CAPS_VF. 2002 */ 2003 static void 2004 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2005 struct ice_aqc_list_caps_elem *cap) 2006 { 2007 u32 logical_id = le32_to_cpu(cap->logical_id); 2008 u32 number = le32_to_cpu(cap->number); 2009 2010 func_p->num_allocd_vfs = number; 2011 func_p->vf_base_id = logical_id; 2012 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", 2013 func_p->num_allocd_vfs); 2014 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", 2015 func_p->vf_base_id); 2016 } 2017 2018 /** 2019 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps 2020 * @hw: pointer to the HW struct 2021 * @func_p: pointer to function capabilities structure 2022 * @cap: pointer to the capability element to parse 2023 * 2024 * Extract function capabilities for ICE_AQC_CAPS_VSI. 2025 */ 2026 static void 2027 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2028 struct ice_aqc_list_caps_elem *cap) 2029 { 2030 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); 2031 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", 2032 le32_to_cpu(cap->number)); 2033 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", 2034 func_p->guar_num_vsi); 2035 } 2036 2037 /** 2038 * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps 2039 * @hw: pointer to the HW struct 2040 * @func_p: pointer to function capabilities structure 2041 * 2042 * Extract function capabilities for ICE_AQC_CAPS_FD. 2043 */ 2044 static void 2045 ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) 2046 { 2047 u32 reg_val, val; 2048 2049 reg_val = rd32(hw, GLQF_FD_SIZE); 2050 val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> 2051 GLQF_FD_SIZE_FD_GSIZE_S; 2052 func_p->fd_fltr_guar = 2053 ice_get_num_per_func(hw, val); 2054 val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> 2055 GLQF_FD_SIZE_FD_BSIZE_S; 2056 func_p->fd_fltr_best_effort = val; 2057 2058 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n", 2059 func_p->fd_fltr_guar); 2060 ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_best_effort = %d\n", 2061 func_p->fd_fltr_best_effort); 2062 } 2063 2064 /** 2065 * ice_parse_func_caps - Parse function capabilities 2066 * @hw: pointer to the HW struct 2067 * @func_p: pointer to function capabilities structure 2068 * @buf: buffer containing the function capability records 2069 * @cap_count: the number of capabilities 2070 * 2071 * Helper function to parse function (0x000A) capabilities list. For 2072 * capabilities shared between device and function, this relies on 2073 * ice_parse_common_caps. 2074 * 2075 * Loop through the list of provided capabilities and extract the relevant 2076 * data into the function capabilities structured. 2077 */ 2078 static void 2079 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, 2080 void *buf, u32 cap_count) 2081 { 2082 struct ice_aqc_list_caps_elem *cap_resp; 2083 u32 i; 2084 2085 cap_resp = buf; 2086 2087 memset(func_p, 0, sizeof(*func_p)); 2088 2089 for (i = 0; i < cap_count; i++) { 2090 u16 cap = le16_to_cpu(cap_resp[i].cap); 2091 bool found; 2092 2093 found = ice_parse_common_caps(hw, &func_p->common_cap, 2094 &cap_resp[i], "func caps"); 2095 2096 switch (cap) { 2097 case ICE_AQC_CAPS_VF: 2098 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); 2099 break; 2100 case ICE_AQC_CAPS_VSI: 2101 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); 2102 break; 2103 case ICE_AQC_CAPS_FD: 2104 ice_parse_fdir_func_caps(hw, func_p); 2105 break; 2106 default: 2107 /* Don't list common capabilities as unknown */ 2108 if (!found) 2109 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n", 2110 i, cap); 2111 break; 2112 } 2113 } 2114 2115 ice_recalc_port_limited_caps(hw, &func_p->common_cap); 2116 } 2117 2118 /** 2119 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps 2120 * @hw: pointer to the HW struct 2121 * @dev_p: pointer to device capabilities structure 2122 * @cap: capability element to parse 2123 * 2124 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. 2125 */ 2126 static void 2127 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2128 struct ice_aqc_list_caps_elem *cap) 2129 { 2130 u32 number = le32_to_cpu(cap->number); 2131 2132 dev_p->num_funcs = hweight32(number); 2133 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", 2134 dev_p->num_funcs); 2135 } 2136 2137 /** 2138 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps 2139 * @hw: pointer to the HW struct 2140 * @dev_p: pointer to device capabilities structure 2141 * @cap: capability element to parse 2142 * 2143 * Parse ICE_AQC_CAPS_VF for device capabilities. 2144 */ 2145 static void 2146 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2147 struct ice_aqc_list_caps_elem *cap) 2148 { 2149 u32 number = le32_to_cpu(cap->number); 2150 2151 dev_p->num_vfs_exposed = number; 2152 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", 2153 dev_p->num_vfs_exposed); 2154 } 2155 2156 /** 2157 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps 2158 * @hw: pointer to the HW struct 2159 * @dev_p: pointer to device capabilities structure 2160 * @cap: capability element to parse 2161 * 2162 * Parse ICE_AQC_CAPS_VSI for device capabilities. 2163 */ 2164 static void 2165 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2166 struct ice_aqc_list_caps_elem *cap) 2167 { 2168 u32 number = le32_to_cpu(cap->number); 2169 2170 dev_p->num_vsi_allocd_to_host = number; 2171 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", 2172 dev_p->num_vsi_allocd_to_host); 2173 } 2174 2175 /** 2176 * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps 2177 * @hw: pointer to the HW struct 2178 * @dev_p: pointer to device capabilities structure 2179 * @cap: capability element to parse 2180 * 2181 * Parse ICE_AQC_CAPS_FD for device capabilities. 2182 */ 2183 static void 2184 ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2185 struct ice_aqc_list_caps_elem *cap) 2186 { 2187 u32 number = le32_to_cpu(cap->number); 2188 2189 dev_p->num_flow_director_fltr = number; 2190 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", 2191 dev_p->num_flow_director_fltr); 2192 } 2193 2194 /** 2195 * ice_parse_dev_caps - Parse device capabilities 2196 * @hw: pointer to the HW struct 2197 * @dev_p: pointer to device capabilities structure 2198 * @buf: buffer containing the device capability records 2199 * @cap_count: the number of capabilities 2200 * 2201 * Helper device to parse device (0x000B) capabilities list. For 2202 * capabilities shared between device and function, this relies on 2203 * ice_parse_common_caps. 2204 * 2205 * Loop through the list of provided capabilities and extract the relevant 2206 * data into the device capabilities structured. 2207 */ 2208 static void 2209 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, 2210 void *buf, u32 cap_count) 2211 { 2212 struct ice_aqc_list_caps_elem *cap_resp; 2213 u32 i; 2214 2215 cap_resp = buf; 2216 2217 memset(dev_p, 0, sizeof(*dev_p)); 2218 2219 for (i = 0; i < cap_count; i++) { 2220 u16 cap = le16_to_cpu(cap_resp[i].cap); 2221 bool found; 2222 2223 found = ice_parse_common_caps(hw, &dev_p->common_cap, 2224 &cap_resp[i], "dev caps"); 2225 2226 switch (cap) { 2227 case ICE_AQC_CAPS_VALID_FUNCTIONS: 2228 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); 2229 break; 2230 case ICE_AQC_CAPS_VF: 2231 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); 2232 break; 2233 case ICE_AQC_CAPS_VSI: 2234 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); 2235 break; 2236 case ICE_AQC_CAPS_FD: 2237 ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); 2238 break; 2239 default: 2240 /* Don't list common capabilities as unknown */ 2241 if (!found) 2242 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n", 2243 i, cap); 2244 break; 2245 } 2246 } 2247 2248 ice_recalc_port_limited_caps(hw, &dev_p->common_cap); 2249 } 2250 2251 /** 2252 * ice_aq_list_caps - query function/device capabilities 2253 * @hw: pointer to the HW struct 2254 * @buf: a buffer to hold the capabilities 2255 * @buf_size: size of the buffer 2256 * @cap_count: if not NULL, set to the number of capabilities reported 2257 * @opc: capabilities type to discover, device or function 2258 * @cd: pointer to command details structure or NULL 2259 * 2260 * Get the function (0x000A) or device (0x000B) capabilities description from 2261 * firmware and store it in the buffer. 2262 * 2263 * If the cap_count pointer is not NULL, then it is set to the number of 2264 * capabilities firmware will report. Note that if the buffer size is too 2265 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The 2266 * cap_count will still be updated in this case. It is recommended that the 2267 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that 2268 * firmware could return) to avoid this. 2269 */ 2270 enum ice_status 2271 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, 2272 enum ice_adminq_opc opc, struct ice_sq_cd *cd) 2273 { 2274 struct ice_aqc_list_caps *cmd; 2275 struct ice_aq_desc desc; 2276 enum ice_status status; 2277 2278 cmd = &desc.params.get_cap; 2279 2280 if (opc != ice_aqc_opc_list_func_caps && 2281 opc != ice_aqc_opc_list_dev_caps) 2282 return ICE_ERR_PARAM; 2283 2284 ice_fill_dflt_direct_cmd_desc(&desc, opc); 2285 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 2286 2287 if (cap_count) 2288 *cap_count = le32_to_cpu(cmd->count); 2289 2290 return status; 2291 } 2292 2293 /** 2294 * ice_discover_dev_caps - Read and extract device capabilities 2295 * @hw: pointer to the hardware structure 2296 * @dev_caps: pointer to device capabilities structure 2297 * 2298 * Read the device capabilities and extract them into the dev_caps structure 2299 * for later use. 2300 */ 2301 enum ice_status 2302 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) 2303 { 2304 enum ice_status status; 2305 u32 cap_count = 0; 2306 void *cbuf; 2307 2308 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2309 if (!cbuf) 2310 return ICE_ERR_NO_MEMORY; 2311 2312 /* Although the driver doesn't know the number of capabilities the 2313 * device will return, we can simply send a 4KB buffer, the maximum 2314 * possible size that firmware can return. 2315 */ 2316 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2317 2318 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2319 ice_aqc_opc_list_dev_caps, NULL); 2320 if (!status) 2321 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); 2322 kfree(cbuf); 2323 2324 return status; 2325 } 2326 2327 /** 2328 * ice_discover_func_caps - Read and extract function capabilities 2329 * @hw: pointer to the hardware structure 2330 * @func_caps: pointer to function capabilities structure 2331 * 2332 * Read the function capabilities and extract them into the func_caps structure 2333 * for later use. 2334 */ 2335 static enum ice_status 2336 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) 2337 { 2338 enum ice_status status; 2339 u32 cap_count = 0; 2340 void *cbuf; 2341 2342 cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); 2343 if (!cbuf) 2344 return ICE_ERR_NO_MEMORY; 2345 2346 /* Although the driver doesn't know the number of capabilities the 2347 * device will return, we can simply send a 4KB buffer, the maximum 2348 * possible size that firmware can return. 2349 */ 2350 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); 2351 2352 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, 2353 ice_aqc_opc_list_func_caps, NULL); 2354 if (!status) 2355 ice_parse_func_caps(hw, func_caps, cbuf, cap_count); 2356 kfree(cbuf); 2357 2358 return status; 2359 } 2360 2361 /** 2362 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode 2363 * @hw: pointer to the hardware structure 2364 */ 2365 void ice_set_safe_mode_caps(struct ice_hw *hw) 2366 { 2367 struct ice_hw_func_caps *func_caps = &hw->func_caps; 2368 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps; 2369 struct ice_hw_common_caps cached_caps; 2370 u32 num_funcs; 2371 2372 /* cache some func_caps values that should be restored after memset */ 2373 cached_caps = func_caps->common_cap; 2374 2375 /* unset func capabilities */ 2376 memset(func_caps, 0, sizeof(*func_caps)); 2377 2378 #define ICE_RESTORE_FUNC_CAP(name) \ 2379 func_caps->common_cap.name = cached_caps.name 2380 2381 /* restore cached values */ 2382 ICE_RESTORE_FUNC_CAP(valid_functions); 2383 ICE_RESTORE_FUNC_CAP(txq_first_id); 2384 ICE_RESTORE_FUNC_CAP(rxq_first_id); 2385 ICE_RESTORE_FUNC_CAP(msix_vector_first_id); 2386 ICE_RESTORE_FUNC_CAP(max_mtu); 2387 ICE_RESTORE_FUNC_CAP(nvm_unified_update); 2388 ICE_RESTORE_FUNC_CAP(nvm_update_pending_nvm); 2389 ICE_RESTORE_FUNC_CAP(nvm_update_pending_orom); 2390 ICE_RESTORE_FUNC_CAP(nvm_update_pending_netlist); 2391 2392 /* one Tx and one Rx queue in safe mode */ 2393 func_caps->common_cap.num_rxq = 1; 2394 func_caps->common_cap.num_txq = 1; 2395 2396 /* two MSIX vectors, one for traffic and one for misc causes */ 2397 func_caps->common_cap.num_msix_vectors = 2; 2398 func_caps->guar_num_vsi = 1; 2399 2400 /* cache some dev_caps values that should be restored after memset */ 2401 cached_caps = dev_caps->common_cap; 2402 num_funcs = dev_caps->num_funcs; 2403 2404 /* unset dev capabilities */ 2405 memset(dev_caps, 0, sizeof(*dev_caps)); 2406 2407 #define ICE_RESTORE_DEV_CAP(name) \ 2408 dev_caps->common_cap.name = cached_caps.name 2409 2410 /* restore cached values */ 2411 ICE_RESTORE_DEV_CAP(valid_functions); 2412 ICE_RESTORE_DEV_CAP(txq_first_id); 2413 ICE_RESTORE_DEV_CAP(rxq_first_id); 2414 ICE_RESTORE_DEV_CAP(msix_vector_first_id); 2415 ICE_RESTORE_DEV_CAP(max_mtu); 2416 ICE_RESTORE_DEV_CAP(nvm_unified_update); 2417 ICE_RESTORE_DEV_CAP(nvm_update_pending_nvm); 2418 ICE_RESTORE_DEV_CAP(nvm_update_pending_orom); 2419 ICE_RESTORE_DEV_CAP(nvm_update_pending_netlist); 2420 dev_caps->num_funcs = num_funcs; 2421 2422 /* one Tx and one Rx queue per function in safe mode */ 2423 dev_caps->common_cap.num_rxq = num_funcs; 2424 dev_caps->common_cap.num_txq = num_funcs; 2425 2426 /* two MSIX vectors per function */ 2427 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs; 2428 } 2429 2430 /** 2431 * ice_get_caps - get info about the HW 2432 * @hw: pointer to the hardware structure 2433 */ 2434 enum ice_status ice_get_caps(struct ice_hw *hw) 2435 { 2436 enum ice_status status; 2437 2438 status = ice_discover_dev_caps(hw, &hw->dev_caps); 2439 if (status) 2440 return status; 2441 2442 return ice_discover_func_caps(hw, &hw->func_caps); 2443 } 2444 2445 /** 2446 * ice_aq_manage_mac_write - manage MAC address write command 2447 * @hw: pointer to the HW struct 2448 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address 2449 * @flags: flags to control write behavior 2450 * @cd: pointer to command details structure or NULL 2451 * 2452 * This function is used to write MAC address to the NVM (0x0108). 2453 */ 2454 enum ice_status 2455 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, 2456 struct ice_sq_cd *cd) 2457 { 2458 struct ice_aqc_manage_mac_write *cmd; 2459 struct ice_aq_desc desc; 2460 2461 cmd = &desc.params.mac_write; 2462 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write); 2463 2464 cmd->flags = flags; 2465 ether_addr_copy(cmd->mac_addr, mac_addr); 2466 2467 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 2468 } 2469 2470 /** 2471 * ice_aq_clear_pxe_mode 2472 * @hw: pointer to the HW struct 2473 * 2474 * Tell the firmware that the driver is taking over from PXE (0x0110). 2475 */ 2476 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw) 2477 { 2478 struct ice_aq_desc desc; 2479 2480 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode); 2481 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT; 2482 2483 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 2484 } 2485 2486 /** 2487 * ice_clear_pxe_mode - clear pxe operations mode 2488 * @hw: pointer to the HW struct 2489 * 2490 * Make sure all PXE mode settings are cleared, including things 2491 * like descriptor fetch/write-back mode. 2492 */ 2493 void ice_clear_pxe_mode(struct ice_hw *hw) 2494 { 2495 if (ice_check_sq_alive(hw, &hw->adminq)) 2496 ice_aq_clear_pxe_mode(hw); 2497 } 2498 2499 /** 2500 * ice_get_link_speed_based_on_phy_type - returns link speed 2501 * @phy_type_low: lower part of phy_type 2502 * @phy_type_high: higher part of phy_type 2503 * 2504 * This helper function will convert an entry in PHY type structure 2505 * [phy_type_low, phy_type_high] to its corresponding link speed. 2506 * Note: In the structure of [phy_type_low, phy_type_high], there should 2507 * be one bit set, as this function will convert one PHY type to its 2508 * speed. 2509 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2510 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned 2511 */ 2512 static u16 2513 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) 2514 { 2515 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2516 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2517 2518 switch (phy_type_low) { 2519 case ICE_PHY_TYPE_LOW_100BASE_TX: 2520 case ICE_PHY_TYPE_LOW_100M_SGMII: 2521 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; 2522 break; 2523 case ICE_PHY_TYPE_LOW_1000BASE_T: 2524 case ICE_PHY_TYPE_LOW_1000BASE_SX: 2525 case ICE_PHY_TYPE_LOW_1000BASE_LX: 2526 case ICE_PHY_TYPE_LOW_1000BASE_KX: 2527 case ICE_PHY_TYPE_LOW_1G_SGMII: 2528 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; 2529 break; 2530 case ICE_PHY_TYPE_LOW_2500BASE_T: 2531 case ICE_PHY_TYPE_LOW_2500BASE_X: 2532 case ICE_PHY_TYPE_LOW_2500BASE_KX: 2533 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; 2534 break; 2535 case ICE_PHY_TYPE_LOW_5GBASE_T: 2536 case ICE_PHY_TYPE_LOW_5GBASE_KR: 2537 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; 2538 break; 2539 case ICE_PHY_TYPE_LOW_10GBASE_T: 2540 case ICE_PHY_TYPE_LOW_10G_SFI_DA: 2541 case ICE_PHY_TYPE_LOW_10GBASE_SR: 2542 case ICE_PHY_TYPE_LOW_10GBASE_LR: 2543 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: 2544 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: 2545 case ICE_PHY_TYPE_LOW_10G_SFI_C2C: 2546 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; 2547 break; 2548 case ICE_PHY_TYPE_LOW_25GBASE_T: 2549 case ICE_PHY_TYPE_LOW_25GBASE_CR: 2550 case ICE_PHY_TYPE_LOW_25GBASE_CR_S: 2551 case ICE_PHY_TYPE_LOW_25GBASE_CR1: 2552 case ICE_PHY_TYPE_LOW_25GBASE_SR: 2553 case ICE_PHY_TYPE_LOW_25GBASE_LR: 2554 case ICE_PHY_TYPE_LOW_25GBASE_KR: 2555 case ICE_PHY_TYPE_LOW_25GBASE_KR_S: 2556 case ICE_PHY_TYPE_LOW_25GBASE_KR1: 2557 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: 2558 case ICE_PHY_TYPE_LOW_25G_AUI_C2C: 2559 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; 2560 break; 2561 case ICE_PHY_TYPE_LOW_40GBASE_CR4: 2562 case ICE_PHY_TYPE_LOW_40GBASE_SR4: 2563 case ICE_PHY_TYPE_LOW_40GBASE_LR4: 2564 case ICE_PHY_TYPE_LOW_40GBASE_KR4: 2565 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: 2566 case ICE_PHY_TYPE_LOW_40G_XLAUI: 2567 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; 2568 break; 2569 case ICE_PHY_TYPE_LOW_50GBASE_CR2: 2570 case ICE_PHY_TYPE_LOW_50GBASE_SR2: 2571 case ICE_PHY_TYPE_LOW_50GBASE_LR2: 2572 case ICE_PHY_TYPE_LOW_50GBASE_KR2: 2573 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: 2574 case ICE_PHY_TYPE_LOW_50G_LAUI2: 2575 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: 2576 case ICE_PHY_TYPE_LOW_50G_AUI2: 2577 case ICE_PHY_TYPE_LOW_50GBASE_CP: 2578 case ICE_PHY_TYPE_LOW_50GBASE_SR: 2579 case ICE_PHY_TYPE_LOW_50GBASE_FR: 2580 case ICE_PHY_TYPE_LOW_50GBASE_LR: 2581 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: 2582 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: 2583 case ICE_PHY_TYPE_LOW_50G_AUI1: 2584 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB; 2585 break; 2586 case ICE_PHY_TYPE_LOW_100GBASE_CR4: 2587 case ICE_PHY_TYPE_LOW_100GBASE_SR4: 2588 case ICE_PHY_TYPE_LOW_100GBASE_LR4: 2589 case ICE_PHY_TYPE_LOW_100GBASE_KR4: 2590 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: 2591 case ICE_PHY_TYPE_LOW_100G_CAUI4: 2592 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: 2593 case ICE_PHY_TYPE_LOW_100G_AUI4: 2594 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: 2595 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4: 2596 case ICE_PHY_TYPE_LOW_100GBASE_CP2: 2597 case ICE_PHY_TYPE_LOW_100GBASE_SR2: 2598 case ICE_PHY_TYPE_LOW_100GBASE_DR: 2599 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB; 2600 break; 2601 default: 2602 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; 2603 break; 2604 } 2605 2606 switch (phy_type_high) { 2607 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: 2608 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: 2609 case ICE_PHY_TYPE_HIGH_100G_CAUI2: 2610 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: 2611 case ICE_PHY_TYPE_HIGH_100G_AUI2: 2612 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; 2613 break; 2614 default: 2615 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; 2616 break; 2617 } 2618 2619 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN && 2620 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2621 return ICE_AQ_LINK_SPEED_UNKNOWN; 2622 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2623 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN) 2624 return ICE_AQ_LINK_SPEED_UNKNOWN; 2625 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN && 2626 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN) 2627 return speed_phy_type_low; 2628 else 2629 return speed_phy_type_high; 2630 } 2631 2632 /** 2633 * ice_update_phy_type 2634 * @phy_type_low: pointer to the lower part of phy_type 2635 * @phy_type_high: pointer to the higher part of phy_type 2636 * @link_speeds_bitmap: targeted link speeds bitmap 2637 * 2638 * Note: For the link_speeds_bitmap structure, you can check it at 2639 * [ice_aqc_get_link_status->link_speed]. Caller can pass in 2640 * link_speeds_bitmap include multiple speeds. 2641 * 2642 * Each entry in this [phy_type_low, phy_type_high] structure will 2643 * present a certain link speed. This helper function will turn on bits 2644 * in [phy_type_low, phy_type_high] structure based on the value of 2645 * link_speeds_bitmap input parameter. 2646 */ 2647 void 2648 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, 2649 u16 link_speeds_bitmap) 2650 { 2651 u64 pt_high; 2652 u64 pt_low; 2653 int index; 2654 u16 speed; 2655 2656 /* We first check with low part of phy_type */ 2657 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { 2658 pt_low = BIT_ULL(index); 2659 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0); 2660 2661 if (link_speeds_bitmap & speed) 2662 *phy_type_low |= BIT_ULL(index); 2663 } 2664 2665 /* We then check with high part of phy_type */ 2666 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) { 2667 pt_high = BIT_ULL(index); 2668 speed = ice_get_link_speed_based_on_phy_type(0, pt_high); 2669 2670 if (link_speeds_bitmap & speed) 2671 *phy_type_high |= BIT_ULL(index); 2672 } 2673 } 2674 2675 /** 2676 * ice_aq_set_phy_cfg 2677 * @hw: pointer to the HW struct 2678 * @pi: port info structure of the interested logical port 2679 * @cfg: structure with PHY configuration data to be set 2680 * @cd: pointer to command details structure or NULL 2681 * 2682 * Set the various PHY configuration parameters supported on the Port. 2683 * One or more of the Set PHY config parameters may be ignored in an MFP 2684 * mode as the PF may not have the privilege to set some of the PHY Config 2685 * parameters. This status will be indicated by the command response (0x0601). 2686 */ 2687 enum ice_status 2688 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, 2689 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) 2690 { 2691 struct ice_aq_desc desc; 2692 enum ice_status status; 2693 2694 if (!cfg) 2695 return ICE_ERR_PARAM; 2696 2697 /* Ensure that only valid bits of cfg->caps can be turned on. */ 2698 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) { 2699 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n", 2700 cfg->caps); 2701 2702 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK; 2703 } 2704 2705 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); 2706 desc.params.set_phy.lport_num = pi->lport; 2707 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 2708 2709 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); 2710 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", 2711 (unsigned long long)le64_to_cpu(cfg->phy_type_low)); 2712 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", 2713 (unsigned long long)le64_to_cpu(cfg->phy_type_high)); 2714 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); 2715 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", 2716 cfg->low_power_ctrl_an); 2717 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); 2718 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); 2719 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", 2720 cfg->link_fec_opt); 2721 2722 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); 2723 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) 2724 status = 0; 2725 2726 if (!status) 2727 pi->phy.curr_user_phy_cfg = *cfg; 2728 2729 return status; 2730 } 2731 2732 /** 2733 * ice_update_link_info - update status of the HW network link 2734 * @pi: port info structure of the interested logical port 2735 */ 2736 enum ice_status ice_update_link_info(struct ice_port_info *pi) 2737 { 2738 struct ice_link_status *li; 2739 enum ice_status status; 2740 2741 if (!pi) 2742 return ICE_ERR_PARAM; 2743 2744 li = &pi->phy.link_info; 2745 2746 status = ice_aq_get_link_info(pi, true, NULL, NULL); 2747 if (status) 2748 return status; 2749 2750 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { 2751 struct ice_aqc_get_phy_caps_data *pcaps; 2752 struct ice_hw *hw; 2753 2754 hw = pi->hw; 2755 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), 2756 GFP_KERNEL); 2757 if (!pcaps) 2758 return ICE_ERR_NO_MEMORY; 2759 2760 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, 2761 pcaps, NULL); 2762 2763 devm_kfree(ice_hw_to_dev(hw), pcaps); 2764 } 2765 2766 return status; 2767 } 2768 2769 /** 2770 * ice_cache_phy_user_req 2771 * @pi: port information structure 2772 * @cache_data: PHY logging data 2773 * @cache_mode: PHY logging mode 2774 * 2775 * Log the user request on (FC, FEC, SPEED) for later use. 2776 */ 2777 static void 2778 ice_cache_phy_user_req(struct ice_port_info *pi, 2779 struct ice_phy_cache_mode_data cache_data, 2780 enum ice_phy_cache_mode cache_mode) 2781 { 2782 if (!pi) 2783 return; 2784 2785 switch (cache_mode) { 2786 case ICE_FC_MODE: 2787 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; 2788 break; 2789 case ICE_SPEED_MODE: 2790 pi->phy.curr_user_speed_req = 2791 cache_data.data.curr_user_speed_req; 2792 break; 2793 case ICE_FEC_MODE: 2794 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; 2795 break; 2796 default: 2797 break; 2798 } 2799 } 2800 2801 /** 2802 * ice_caps_to_fc_mode 2803 * @caps: PHY capabilities 2804 * 2805 * Convert PHY FC capabilities to ice FC mode 2806 */ 2807 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) 2808 { 2809 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && 2810 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2811 return ICE_FC_FULL; 2812 2813 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) 2814 return ICE_FC_TX_PAUSE; 2815 2816 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) 2817 return ICE_FC_RX_PAUSE; 2818 2819 return ICE_FC_NONE; 2820 } 2821 2822 /** 2823 * ice_caps_to_fec_mode 2824 * @caps: PHY capabilities 2825 * @fec_options: Link FEC options 2826 * 2827 * Convert PHY FEC capabilities to ice FEC mode 2828 */ 2829 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) 2830 { 2831 if (caps & ICE_AQC_PHY_EN_AUTO_FEC) 2832 return ICE_FEC_AUTO; 2833 2834 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 2835 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 2836 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | 2837 ICE_AQC_PHY_FEC_25G_KR_REQ)) 2838 return ICE_FEC_BASER; 2839 2840 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | 2841 ICE_AQC_PHY_FEC_25G_RS_544_REQ | 2842 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) 2843 return ICE_FEC_RS; 2844 2845 return ICE_FEC_NONE; 2846 } 2847 2848 /** 2849 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode 2850 * @pi: port information structure 2851 * @cfg: PHY configuration data to set FC mode 2852 * @req_mode: FC mode to configure 2853 */ 2854 enum ice_status 2855 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 2856 enum ice_fc_mode req_mode) 2857 { 2858 struct ice_phy_cache_mode_data cache_data; 2859 u8 pause_mask = 0x0; 2860 2861 if (!pi || !cfg) 2862 return ICE_ERR_BAD_PTR; 2863 2864 switch (req_mode) { 2865 case ICE_FC_FULL: 2866 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2867 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2868 break; 2869 case ICE_FC_RX_PAUSE: 2870 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; 2871 break; 2872 case ICE_FC_TX_PAUSE: 2873 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; 2874 break; 2875 default: 2876 break; 2877 } 2878 2879 /* clear the old pause settings */ 2880 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | 2881 ICE_AQC_PHY_EN_RX_LINK_PAUSE); 2882 2883 /* set the new capabilities */ 2884 cfg->caps |= pause_mask; 2885 2886 /* Cache user FC request */ 2887 cache_data.data.curr_user_fc_req = req_mode; 2888 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); 2889 2890 return 0; 2891 } 2892 2893 /** 2894 * ice_set_fc 2895 * @pi: port information structure 2896 * @aq_failures: pointer to status code, specific to ice_set_fc routine 2897 * @ena_auto_link_update: enable automatic link update 2898 * 2899 * Set the requested flow control mode. 2900 */ 2901 enum ice_status 2902 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) 2903 { 2904 struct ice_aqc_set_phy_cfg_data cfg = { 0 }; 2905 struct ice_aqc_get_phy_caps_data *pcaps; 2906 enum ice_status status; 2907 struct ice_hw *hw; 2908 2909 if (!pi || !aq_failures) 2910 return ICE_ERR_BAD_PTR; 2911 2912 *aq_failures = 0; 2913 hw = pi->hw; 2914 2915 pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); 2916 if (!pcaps) 2917 return ICE_ERR_NO_MEMORY; 2918 2919 /* Get the current PHY config */ 2920 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG, 2921 pcaps, NULL); 2922 if (status) { 2923 *aq_failures = ICE_SET_FC_AQ_FAIL_GET; 2924 goto out; 2925 } 2926 2927 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); 2928 2929 /* Configure the set PHY data */ 2930 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); 2931 if (status) 2932 goto out; 2933 2934 /* If the capabilities have changed, then set the new config */ 2935 if (cfg.caps != pcaps->caps) { 2936 int retry_count, retry_max = 10; 2937 2938 /* Auto restart link so settings take effect */ 2939 if (ena_auto_link_update) 2940 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2941 2942 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); 2943 if (status) { 2944 *aq_failures = ICE_SET_FC_AQ_FAIL_SET; 2945 goto out; 2946 } 2947 2948 /* Update the link info 2949 * It sometimes takes a really long time for link to 2950 * come back from the atomic reset. Thus, we wait a 2951 * little bit. 2952 */ 2953 for (retry_count = 0; retry_count < retry_max; retry_count++) { 2954 status = ice_update_link_info(pi); 2955 2956 if (!status) 2957 break; 2958 2959 mdelay(100); 2960 } 2961 2962 if (status) 2963 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE; 2964 } 2965 2966 out: 2967 devm_kfree(ice_hw_to_dev(hw), pcaps); 2968 return status; 2969 } 2970 2971 /** 2972 * ice_phy_caps_equals_cfg 2973 * @phy_caps: PHY capabilities 2974 * @phy_cfg: PHY configuration 2975 * 2976 * Helper function to determine if PHY capabilities matches PHY 2977 * configuration 2978 */ 2979 bool 2980 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, 2981 struct ice_aqc_set_phy_cfg_data *phy_cfg) 2982 { 2983 u8 caps_mask, cfg_mask; 2984 2985 if (!phy_caps || !phy_cfg) 2986 return false; 2987 2988 /* These bits are not common between capabilities and configuration. 2989 * Do not use them to determine equality. 2990 */ 2991 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | 2992 ICE_AQC_GET_PHY_EN_MOD_QUAL); 2993 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; 2994 2995 if (phy_caps->phy_type_low != phy_cfg->phy_type_low || 2996 phy_caps->phy_type_high != phy_cfg->phy_type_high || 2997 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || 2998 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || 2999 phy_caps->eee_cap != phy_cfg->eee_cap || 3000 phy_caps->eeer_value != phy_cfg->eeer_value || 3001 phy_caps->link_fec_options != phy_cfg->link_fec_opt) 3002 return false; 3003 3004 return true; 3005 } 3006 3007 /** 3008 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data 3009 * @pi: port information structure 3010 * @caps: PHY ability structure to copy date from 3011 * @cfg: PHY configuration structure to copy data to 3012 * 3013 * Helper function to copy AQC PHY get ability data to PHY set configuration 3014 * data structure 3015 */ 3016 void 3017 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, 3018 struct ice_aqc_get_phy_caps_data *caps, 3019 struct ice_aqc_set_phy_cfg_data *cfg) 3020 { 3021 if (!pi || !caps || !cfg) 3022 return; 3023 3024 memset(cfg, 0, sizeof(*cfg)); 3025 cfg->phy_type_low = caps->phy_type_low; 3026 cfg->phy_type_high = caps->phy_type_high; 3027 cfg->caps = caps->caps; 3028 cfg->low_power_ctrl_an = caps->low_power_ctrl_an; 3029 cfg->eee_cap = caps->eee_cap; 3030 cfg->eeer_value = caps->eeer_value; 3031 cfg->link_fec_opt = caps->link_fec_options; 3032 cfg->module_compliance_enforcement = 3033 caps->module_compliance_enforcement; 3034 } 3035 3036 /** 3037 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode 3038 * @pi: port information structure 3039 * @cfg: PHY configuration data to set FEC mode 3040 * @fec: FEC mode to configure 3041 */ 3042 enum ice_status 3043 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, 3044 enum ice_fec_mode fec) 3045 { 3046 struct ice_aqc_get_phy_caps_data *pcaps; 3047 enum ice_status status; 3048 struct ice_hw *hw; 3049 3050 if (!pi || !cfg) 3051 return ICE_ERR_BAD_PTR; 3052 3053 hw = pi->hw; 3054 3055 pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); 3056 if (!pcaps) 3057 return ICE_ERR_NO_MEMORY; 3058 3059 status = ice_aq_get_phy_caps(pi, false, 3060 (ice_fw_supports_report_dflt_cfg(hw) ? 3061 ICE_AQC_REPORT_DFLT_CFG : 3062 ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL); 3063 if (status) 3064 goto out; 3065 3066 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; 3067 cfg->link_fec_opt = pcaps->link_fec_options; 3068 3069 switch (fec) { 3070 case ICE_FEC_BASER: 3071 /* Clear RS bits, and AND BASE-R ability 3072 * bits and OR request bits. 3073 */ 3074 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | 3075 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; 3076 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | 3077 ICE_AQC_PHY_FEC_25G_KR_REQ; 3078 break; 3079 case ICE_FEC_RS: 3080 /* Clear BASE-R bits, and AND RS ability 3081 * bits and OR request bits. 3082 */ 3083 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; 3084 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | 3085 ICE_AQC_PHY_FEC_25G_RS_544_REQ; 3086 break; 3087 case ICE_FEC_NONE: 3088 /* Clear all FEC option bits. */ 3089 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK; 3090 break; 3091 case ICE_FEC_AUTO: 3092 /* AND auto FEC bit, and all caps bits. */ 3093 cfg->caps &= ICE_AQC_PHY_CAPS_MASK; 3094 cfg->link_fec_opt |= pcaps->link_fec_options; 3095 break; 3096 default: 3097 status = ICE_ERR_PARAM; 3098 break; 3099 } 3100 3101 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) && 3102 !ice_fw_supports_report_dflt_cfg(hw)) { 3103 struct ice_link_default_override_tlv tlv; 3104 3105 if (ice_get_link_default_override(&tlv, pi)) 3106 goto out; 3107 3108 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && 3109 (tlv.options & ICE_LINK_OVERRIDE_EN)) 3110 cfg->link_fec_opt = tlv.fec_options; 3111 } 3112 3113 out: 3114 kfree(pcaps); 3115 3116 return status; 3117 } 3118 3119 /** 3120 * ice_get_link_status - get status of the HW network link 3121 * @pi: port information structure 3122 * @link_up: pointer to bool (true/false = linkup/linkdown) 3123 * 3124 * Variable link_up is true if link is up, false if link is down. 3125 * The variable link_up is invalid if status is non zero. As a 3126 * result of this call, link status reporting becomes enabled 3127 */ 3128 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up) 3129 { 3130 struct ice_phy_info *phy_info; 3131 enum ice_status status = 0; 3132 3133 if (!pi || !link_up) 3134 return ICE_ERR_PARAM; 3135 3136 phy_info = &pi->phy; 3137 3138 if (phy_info->get_link_info) { 3139 status = ice_update_link_info(pi); 3140 3141 if (status) 3142 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n", 3143 status); 3144 } 3145 3146 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP; 3147 3148 return status; 3149 } 3150 3151 /** 3152 * ice_aq_set_link_restart_an 3153 * @pi: pointer to the port information structure 3154 * @ena_link: if true: enable link, if false: disable link 3155 * @cd: pointer to command details structure or NULL 3156 * 3157 * Sets up the link and restarts the Auto-Negotiation over the link. 3158 */ 3159 enum ice_status 3160 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, 3161 struct ice_sq_cd *cd) 3162 { 3163 struct ice_aqc_restart_an *cmd; 3164 struct ice_aq_desc desc; 3165 3166 cmd = &desc.params.restart_an; 3167 3168 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an); 3169 3170 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART; 3171 cmd->lport_num = pi->lport; 3172 if (ena_link) 3173 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE; 3174 else 3175 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE; 3176 3177 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); 3178 } 3179 3180 /** 3181 * ice_aq_set_event_mask 3182 * @hw: pointer to the HW struct 3183 * @port_num: port number of the physical function 3184 * @mask: event mask to be set 3185 * @cd: pointer to command details structure or NULL 3186 * 3187 * Set event mask (0x0613) 3188 */ 3189 enum ice_status 3190 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask, 3191 struct ice_sq_cd *cd) 3192 { 3193 struct ice_aqc_set_event_mask *cmd; 3194 struct ice_aq_desc desc; 3195 3196 cmd = &desc.params.set_event_mask; 3197 3198 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask); 3199 3200 cmd->lport_num = port_num; 3201 3202 cmd->event_mask = cpu_to_le16(mask); 3203 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3204 } 3205 3206 /** 3207 * ice_aq_set_mac_loopback 3208 * @hw: pointer to the HW struct 3209 * @ena_lpbk: Enable or Disable loopback 3210 * @cd: pointer to command details structure or NULL 3211 * 3212 * Enable/disable loopback on a given port 3213 */ 3214 enum ice_status 3215 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd) 3216 { 3217 struct ice_aqc_set_mac_lb *cmd; 3218 struct ice_aq_desc desc; 3219 3220 cmd = &desc.params.set_mac_lb; 3221 3222 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb); 3223 if (ena_lpbk) 3224 cmd->lb_mode = ICE_AQ_MAC_LB_EN; 3225 3226 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3227 } 3228 3229 /** 3230 * ice_aq_set_port_id_led 3231 * @pi: pointer to the port information 3232 * @is_orig_mode: is this LED set to original mode (by the net-list) 3233 * @cd: pointer to command details structure or NULL 3234 * 3235 * Set LED value for the given port (0x06e9) 3236 */ 3237 enum ice_status 3238 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode, 3239 struct ice_sq_cd *cd) 3240 { 3241 struct ice_aqc_set_port_id_led *cmd; 3242 struct ice_hw *hw = pi->hw; 3243 struct ice_aq_desc desc; 3244 3245 cmd = &desc.params.set_port_id_led; 3246 3247 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led); 3248 3249 if (is_orig_mode) 3250 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG; 3251 else 3252 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK; 3253 3254 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); 3255 } 3256 3257 /** 3258 * ice_aq_sff_eeprom 3259 * @hw: pointer to the HW struct 3260 * @lport: bits [7:0] = logical port, bit [8] = logical port valid 3261 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default) 3262 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding. 3263 * @page: QSFP page 3264 * @set_page: set or ignore the page 3265 * @data: pointer to data buffer to be read/written to the I2C device. 3266 * @length: 1-16 for read, 1 for write. 3267 * @write: 0 read, 1 for write. 3268 * @cd: pointer to command details structure or NULL 3269 * 3270 * Read/Write SFF EEPROM (0x06EE) 3271 */ 3272 enum ice_status 3273 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, 3274 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, 3275 bool write, struct ice_sq_cd *cd) 3276 { 3277 struct ice_aqc_sff_eeprom *cmd; 3278 struct ice_aq_desc desc; 3279 enum ice_status status; 3280 3281 if (!data || (mem_addr & 0xff00)) 3282 return ICE_ERR_PARAM; 3283 3284 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom); 3285 cmd = &desc.params.read_write_sff_param; 3286 desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); 3287 cmd->lport_num = (u8)(lport & 0xff); 3288 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01); 3289 cmd->i2c_bus_addr = cpu_to_le16(((bus_addr >> 1) & 3290 ICE_AQC_SFF_I2CBUS_7BIT_M) | 3291 ((set_page << 3292 ICE_AQC_SFF_SET_EEPROM_PAGE_S) & 3293 ICE_AQC_SFF_SET_EEPROM_PAGE_M)); 3294 cmd->i2c_mem_addr = cpu_to_le16(mem_addr & 0xff); 3295 cmd->eeprom_page = cpu_to_le16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S); 3296 if (write) 3297 cmd->i2c_bus_addr |= cpu_to_le16(ICE_AQC_SFF_IS_WRITE); 3298 3299 status = ice_aq_send_cmd(hw, &desc, data, length, cd); 3300 return status; 3301 } 3302 3303 /** 3304 * __ice_aq_get_set_rss_lut 3305 * @hw: pointer to the hardware structure 3306 * @params: RSS LUT parameters 3307 * @set: set true to set the table, false to get the table 3308 * 3309 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table 3310 */ 3311 static enum ice_status 3312 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set) 3313 { 3314 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle; 3315 struct ice_aqc_get_set_rss_lut *cmd_resp; 3316 struct ice_aq_desc desc; 3317 enum ice_status status; 3318 u8 *lut; 3319 3320 if (!params) 3321 return ICE_ERR_PARAM; 3322 3323 vsi_handle = params->vsi_handle; 3324 lut = params->lut; 3325 3326 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut) 3327 return ICE_ERR_PARAM; 3328 3329 lut_size = params->lut_size; 3330 lut_type = params->lut_type; 3331 glob_lut_idx = params->global_lut_id; 3332 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); 3333 3334 cmd_resp = &desc.params.get_set_rss_lut; 3335 3336 if (set) { 3337 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut); 3338 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3339 } else { 3340 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut); 3341 } 3342 3343 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3344 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) & 3345 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) | 3346 ICE_AQC_GSET_RSS_LUT_VSI_VALID); 3347 3348 switch (lut_type) { 3349 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI: 3350 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF: 3351 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL: 3352 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) & 3353 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M); 3354 break; 3355 default: 3356 status = ICE_ERR_PARAM; 3357 goto ice_aq_get_set_rss_lut_exit; 3358 } 3359 3360 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) { 3361 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) & 3362 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M); 3363 3364 if (!set) 3365 goto ice_aq_get_set_rss_lut_send; 3366 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3367 if (!set) 3368 goto ice_aq_get_set_rss_lut_send; 3369 } else { 3370 goto ice_aq_get_set_rss_lut_send; 3371 } 3372 3373 /* LUT size is only valid for Global and PF table types */ 3374 switch (lut_size) { 3375 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128: 3376 break; 3377 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512: 3378 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG << 3379 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3380 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3381 break; 3382 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K: 3383 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) { 3384 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG << 3385 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) & 3386 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M; 3387 break; 3388 } 3389 fallthrough; 3390 default: 3391 status = ICE_ERR_PARAM; 3392 goto ice_aq_get_set_rss_lut_exit; 3393 } 3394 3395 ice_aq_get_set_rss_lut_send: 3396 cmd_resp->flags = cpu_to_le16(flags); 3397 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL); 3398 3399 ice_aq_get_set_rss_lut_exit: 3400 return status; 3401 } 3402 3403 /** 3404 * ice_aq_get_rss_lut 3405 * @hw: pointer to the hardware structure 3406 * @get_params: RSS LUT parameters used to specify which RSS LUT to get 3407 * 3408 * get the RSS lookup table, PF or VSI type 3409 */ 3410 enum ice_status 3411 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params) 3412 { 3413 return __ice_aq_get_set_rss_lut(hw, get_params, false); 3414 } 3415 3416 /** 3417 * ice_aq_set_rss_lut 3418 * @hw: pointer to the hardware structure 3419 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT 3420 * 3421 * set the RSS lookup table, PF or VSI type 3422 */ 3423 enum ice_status 3424 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params) 3425 { 3426 return __ice_aq_get_set_rss_lut(hw, set_params, true); 3427 } 3428 3429 /** 3430 * __ice_aq_get_set_rss_key 3431 * @hw: pointer to the HW struct 3432 * @vsi_id: VSI FW index 3433 * @key: pointer to key info struct 3434 * @set: set true to set the key, false to get the key 3435 * 3436 * get (0x0B04) or set (0x0B02) the RSS key per VSI 3437 */ 3438 static enum 3439 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id, 3440 struct ice_aqc_get_set_rss_keys *key, 3441 bool set) 3442 { 3443 struct ice_aqc_get_set_rss_key *cmd_resp; 3444 u16 key_size = sizeof(*key); 3445 struct ice_aq_desc desc; 3446 3447 cmd_resp = &desc.params.get_set_rss_key; 3448 3449 if (set) { 3450 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key); 3451 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3452 } else { 3453 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key); 3454 } 3455 3456 cmd_resp->vsi_id = cpu_to_le16(((vsi_id << 3457 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) & 3458 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) | 3459 ICE_AQC_GSET_RSS_KEY_VSI_VALID); 3460 3461 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL); 3462 } 3463 3464 /** 3465 * ice_aq_get_rss_key 3466 * @hw: pointer to the HW struct 3467 * @vsi_handle: software VSI handle 3468 * @key: pointer to key info struct 3469 * 3470 * get the RSS key per VSI 3471 */ 3472 enum ice_status 3473 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle, 3474 struct ice_aqc_get_set_rss_keys *key) 3475 { 3476 if (!ice_is_vsi_valid(hw, vsi_handle) || !key) 3477 return ICE_ERR_PARAM; 3478 3479 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3480 key, false); 3481 } 3482 3483 /** 3484 * ice_aq_set_rss_key 3485 * @hw: pointer to the HW struct 3486 * @vsi_handle: software VSI handle 3487 * @keys: pointer to key info struct 3488 * 3489 * set the RSS key per VSI 3490 */ 3491 enum ice_status 3492 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, 3493 struct ice_aqc_get_set_rss_keys *keys) 3494 { 3495 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys) 3496 return ICE_ERR_PARAM; 3497 3498 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle), 3499 keys, true); 3500 } 3501 3502 /** 3503 * ice_aq_add_lan_txq 3504 * @hw: pointer to the hardware structure 3505 * @num_qgrps: Number of added queue groups 3506 * @qg_list: list of queue groups to be added 3507 * @buf_size: size of buffer for indirect command 3508 * @cd: pointer to command details structure or NULL 3509 * 3510 * Add Tx LAN queue (0x0C30) 3511 * 3512 * NOTE: 3513 * Prior to calling add Tx LAN queue: 3514 * Initialize the following as part of the Tx queue context: 3515 * Completion queue ID if the queue uses Completion queue, Quanta profile, 3516 * Cache profile and Packet shaper profile. 3517 * 3518 * After add Tx LAN queue AQ command is completed: 3519 * Interrupts should be associated with specific queues, 3520 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue 3521 * flow. 3522 */ 3523 static enum ice_status 3524 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, 3525 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, 3526 struct ice_sq_cd *cd) 3527 { 3528 struct ice_aqc_add_tx_qgrp *list; 3529 struct ice_aqc_add_txqs *cmd; 3530 struct ice_aq_desc desc; 3531 u16 i, sum_size = 0; 3532 3533 cmd = &desc.params.add_txqs; 3534 3535 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs); 3536 3537 if (!qg_list) 3538 return ICE_ERR_PARAM; 3539 3540 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 3541 return ICE_ERR_PARAM; 3542 3543 for (i = 0, list = qg_list; i < num_qgrps; i++) { 3544 sum_size += struct_size(list, txqs, list->num_txqs); 3545 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + 3546 list->num_txqs); 3547 } 3548 3549 if (buf_size != sum_size) 3550 return ICE_ERR_PARAM; 3551 3552 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3553 3554 cmd->num_qgrps = num_qgrps; 3555 3556 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 3557 } 3558 3559 /** 3560 * ice_aq_dis_lan_txq 3561 * @hw: pointer to the hardware structure 3562 * @num_qgrps: number of groups in the list 3563 * @qg_list: the list of groups to disable 3564 * @buf_size: the total size of the qg_list buffer in bytes 3565 * @rst_src: if called due to reset, specifies the reset source 3566 * @vmvf_num: the relative VM or VF number that is undergoing the reset 3567 * @cd: pointer to command details structure or NULL 3568 * 3569 * Disable LAN Tx queue (0x0C31) 3570 */ 3571 static enum ice_status 3572 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, 3573 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size, 3574 enum ice_disq_rst_src rst_src, u16 vmvf_num, 3575 struct ice_sq_cd *cd) 3576 { 3577 struct ice_aqc_dis_txq_item *item; 3578 struct ice_aqc_dis_txqs *cmd; 3579 struct ice_aq_desc desc; 3580 enum ice_status status; 3581 u16 i, sz = 0; 3582 3583 cmd = &desc.params.dis_txqs; 3584 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs); 3585 3586 /* qg_list can be NULL only in VM/VF reset flow */ 3587 if (!qg_list && !rst_src) 3588 return ICE_ERR_PARAM; 3589 3590 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) 3591 return ICE_ERR_PARAM; 3592 3593 cmd->num_entries = num_qgrps; 3594 3595 cmd->vmvf_and_timeout = cpu_to_le16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) & 3596 ICE_AQC_Q_DIS_TIMEOUT_M); 3597 3598 switch (rst_src) { 3599 case ICE_VM_RESET: 3600 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET; 3601 cmd->vmvf_and_timeout |= 3602 cpu_to_le16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M); 3603 break; 3604 case ICE_VF_RESET: 3605 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET; 3606 /* In this case, FW expects vmvf_num to be absolute VF ID */ 3607 cmd->vmvf_and_timeout |= 3608 cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) & 3609 ICE_AQC_Q_DIS_VMVF_NUM_M); 3610 break; 3611 case ICE_NO_RESET: 3612 default: 3613 break; 3614 } 3615 3616 /* flush pipe on time out */ 3617 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE; 3618 /* If no queue group info, we are in a reset flow. Issue the AQ */ 3619 if (!qg_list) 3620 goto do_aq; 3621 3622 /* set RD bit to indicate that command buffer is provided by the driver 3623 * and it needs to be read by the firmware 3624 */ 3625 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3626 3627 for (i = 0, item = qg_list; i < num_qgrps; i++) { 3628 u16 item_size = struct_size(item, q_id, item->num_qs); 3629 3630 /* If the num of queues is even, add 2 bytes of padding */ 3631 if ((item->num_qs % 2) == 0) 3632 item_size += 2; 3633 3634 sz += item_size; 3635 3636 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); 3637 } 3638 3639 if (buf_size != sz) 3640 return ICE_ERR_PARAM; 3641 3642 do_aq: 3643 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd); 3644 if (status) { 3645 if (!qg_list) 3646 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", 3647 vmvf_num, hw->adminq.sq_last_status); 3648 else 3649 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n", 3650 le16_to_cpu(qg_list[0].q_id[0]), 3651 hw->adminq.sq_last_status); 3652 } 3653 return status; 3654 } 3655 3656 /** 3657 * ice_aq_add_rdma_qsets 3658 * @hw: pointer to the hardware structure 3659 * @num_qset_grps: Number of RDMA Qset groups 3660 * @qset_list: list of Qset groups to be added 3661 * @buf_size: size of buffer for indirect command 3662 * @cd: pointer to command details structure or NULL 3663 * 3664 * Add Tx RDMA Qsets (0x0C33) 3665 */ 3666 static int 3667 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, 3668 struct ice_aqc_add_rdma_qset_data *qset_list, 3669 u16 buf_size, struct ice_sq_cd *cd) 3670 { 3671 struct ice_aqc_add_rdma_qset_data *list; 3672 struct ice_aqc_add_rdma_qset *cmd; 3673 struct ice_aq_desc desc; 3674 u16 i, sum_size = 0; 3675 3676 cmd = &desc.params.add_rdma_qset; 3677 3678 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset); 3679 3680 if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS) 3681 return -EINVAL; 3682 3683 for (i = 0, list = qset_list; i < num_qset_grps; i++) { 3684 u16 num_qsets = le16_to_cpu(list->num_qsets); 3685 3686 sum_size += struct_size(list, rdma_qsets, num_qsets); 3687 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets + 3688 num_qsets); 3689 } 3690 3691 if (buf_size != sum_size) 3692 return -EINVAL; 3693 3694 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); 3695 3696 cmd->num_qset_grps = num_qset_grps; 3697 3698 return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list, 3699 buf_size, cd)); 3700 } 3701 3702 /* End of FW Admin Queue command wrappers */ 3703 3704 /** 3705 * ice_write_byte - write a byte to a packed context structure 3706 * @src_ctx: the context structure to read from 3707 * @dest_ctx: the context to be written to 3708 * @ce_info: a description of the struct to be filled 3709 */ 3710 static void 3711 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3712 { 3713 u8 src_byte, dest_byte, mask; 3714 u8 *from, *dest; 3715 u16 shift_width; 3716 3717 /* copy from the next struct field */ 3718 from = src_ctx + ce_info->offset; 3719 3720 /* prepare the bits and mask */ 3721 shift_width = ce_info->lsb % 8; 3722 mask = (u8)(BIT(ce_info->width) - 1); 3723 3724 src_byte = *from; 3725 src_byte &= mask; 3726 3727 /* shift to correct alignment */ 3728 mask <<= shift_width; 3729 src_byte <<= shift_width; 3730 3731 /* get the current bits from the target bit string */ 3732 dest = dest_ctx + (ce_info->lsb / 8); 3733 3734 memcpy(&dest_byte, dest, sizeof(dest_byte)); 3735 3736 dest_byte &= ~mask; /* get the bits not changing */ 3737 dest_byte |= src_byte; /* add in the new bits */ 3738 3739 /* put it all back */ 3740 memcpy(dest, &dest_byte, sizeof(dest_byte)); 3741 } 3742 3743 /** 3744 * ice_write_word - write a word to a packed context structure 3745 * @src_ctx: the context structure to read from 3746 * @dest_ctx: the context to be written to 3747 * @ce_info: a description of the struct to be filled 3748 */ 3749 static void 3750 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3751 { 3752 u16 src_word, mask; 3753 __le16 dest_word; 3754 u8 *from, *dest; 3755 u16 shift_width; 3756 3757 /* copy from the next struct field */ 3758 from = src_ctx + ce_info->offset; 3759 3760 /* prepare the bits and mask */ 3761 shift_width = ce_info->lsb % 8; 3762 mask = BIT(ce_info->width) - 1; 3763 3764 /* don't swizzle the bits until after the mask because the mask bits 3765 * will be in a different bit position on big endian machines 3766 */ 3767 src_word = *(u16 *)from; 3768 src_word &= mask; 3769 3770 /* shift to correct alignment */ 3771 mask <<= shift_width; 3772 src_word <<= shift_width; 3773 3774 /* get the current bits from the target bit string */ 3775 dest = dest_ctx + (ce_info->lsb / 8); 3776 3777 memcpy(&dest_word, dest, sizeof(dest_word)); 3778 3779 dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ 3780 dest_word |= cpu_to_le16(src_word); /* add in the new bits */ 3781 3782 /* put it all back */ 3783 memcpy(dest, &dest_word, sizeof(dest_word)); 3784 } 3785 3786 /** 3787 * ice_write_dword - write a dword to a packed context structure 3788 * @src_ctx: the context structure to read from 3789 * @dest_ctx: the context to be written to 3790 * @ce_info: a description of the struct to be filled 3791 */ 3792 static void 3793 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3794 { 3795 u32 src_dword, mask; 3796 __le32 dest_dword; 3797 u8 *from, *dest; 3798 u16 shift_width; 3799 3800 /* copy from the next struct field */ 3801 from = src_ctx + ce_info->offset; 3802 3803 /* prepare the bits and mask */ 3804 shift_width = ce_info->lsb % 8; 3805 3806 /* if the field width is exactly 32 on an x86 machine, then the shift 3807 * operation will not work because the SHL instructions count is masked 3808 * to 5 bits so the shift will do nothing 3809 */ 3810 if (ce_info->width < 32) 3811 mask = BIT(ce_info->width) - 1; 3812 else 3813 mask = (u32)~0; 3814 3815 /* don't swizzle the bits until after the mask because the mask bits 3816 * will be in a different bit position on big endian machines 3817 */ 3818 src_dword = *(u32 *)from; 3819 src_dword &= mask; 3820 3821 /* shift to correct alignment */ 3822 mask <<= shift_width; 3823 src_dword <<= shift_width; 3824 3825 /* get the current bits from the target bit string */ 3826 dest = dest_ctx + (ce_info->lsb / 8); 3827 3828 memcpy(&dest_dword, dest, sizeof(dest_dword)); 3829 3830 dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ 3831 dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ 3832 3833 /* put it all back */ 3834 memcpy(dest, &dest_dword, sizeof(dest_dword)); 3835 } 3836 3837 /** 3838 * ice_write_qword - write a qword to a packed context structure 3839 * @src_ctx: the context structure to read from 3840 * @dest_ctx: the context to be written to 3841 * @ce_info: a description of the struct to be filled 3842 */ 3843 static void 3844 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) 3845 { 3846 u64 src_qword, mask; 3847 __le64 dest_qword; 3848 u8 *from, *dest; 3849 u16 shift_width; 3850 3851 /* copy from the next struct field */ 3852 from = src_ctx + ce_info->offset; 3853 3854 /* prepare the bits and mask */ 3855 shift_width = ce_info->lsb % 8; 3856 3857 /* if the field width is exactly 64 on an x86 machine, then the shift 3858 * operation will not work because the SHL instructions count is masked 3859 * to 6 bits so the shift will do nothing 3860 */ 3861 if (ce_info->width < 64) 3862 mask = BIT_ULL(ce_info->width) - 1; 3863 else 3864 mask = (u64)~0; 3865 3866 /* don't swizzle the bits until after the mask because the mask bits 3867 * will be in a different bit position on big endian machines 3868 */ 3869 src_qword = *(u64 *)from; 3870 src_qword &= mask; 3871 3872 /* shift to correct alignment */ 3873 mask <<= shift_width; 3874 src_qword <<= shift_width; 3875 3876 /* get the current bits from the target bit string */ 3877 dest = dest_ctx + (ce_info->lsb / 8); 3878 3879 memcpy(&dest_qword, dest, sizeof(dest_qword)); 3880 3881 dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ 3882 dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ 3883 3884 /* put it all back */ 3885 memcpy(dest, &dest_qword, sizeof(dest_qword)); 3886 } 3887 3888 /** 3889 * ice_set_ctx - set context bits in packed structure 3890 * @hw: pointer to the hardware structure 3891 * @src_ctx: pointer to a generic non-packed context structure 3892 * @dest_ctx: pointer to memory for the packed structure 3893 * @ce_info: a description of the structure to be transformed 3894 */ 3895 enum ice_status 3896 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, 3897 const struct ice_ctx_ele *ce_info) 3898 { 3899 int f; 3900 3901 for (f = 0; ce_info[f].width; f++) { 3902 /* We have to deal with each element of the FW response 3903 * using the correct size so that we are correct regardless 3904 * of the endianness of the machine. 3905 */ 3906 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { 3907 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", 3908 f, ce_info[f].width, ce_info[f].size_of); 3909 continue; 3910 } 3911 switch (ce_info[f].size_of) { 3912 case sizeof(u8): 3913 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); 3914 break; 3915 case sizeof(u16): 3916 ice_write_word(src_ctx, dest_ctx, &ce_info[f]); 3917 break; 3918 case sizeof(u32): 3919 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); 3920 break; 3921 case sizeof(u64): 3922 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); 3923 break; 3924 default: 3925 return ICE_ERR_INVAL_SIZE; 3926 } 3927 } 3928 3929 return 0; 3930 } 3931 3932 /** 3933 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC 3934 * @hw: pointer to the HW struct 3935 * @vsi_handle: software VSI handle 3936 * @tc: TC number 3937 * @q_handle: software queue handle 3938 */ 3939 struct ice_q_ctx * 3940 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle) 3941 { 3942 struct ice_vsi_ctx *vsi; 3943 struct ice_q_ctx *q_ctx; 3944 3945 vsi = ice_get_vsi_ctx(hw, vsi_handle); 3946 if (!vsi) 3947 return NULL; 3948 if (q_handle >= vsi->num_lan_q_entries[tc]) 3949 return NULL; 3950 if (!vsi->lan_q_ctx[tc]) 3951 return NULL; 3952 q_ctx = vsi->lan_q_ctx[tc]; 3953 return &q_ctx[q_handle]; 3954 } 3955 3956 /** 3957 * ice_ena_vsi_txq 3958 * @pi: port information structure 3959 * @vsi_handle: software VSI handle 3960 * @tc: TC number 3961 * @q_handle: software queue handle 3962 * @num_qgrps: Number of added queue groups 3963 * @buf: list of queue groups to be added 3964 * @buf_size: size of buffer for indirect command 3965 * @cd: pointer to command details structure or NULL 3966 * 3967 * This function adds one LAN queue 3968 */ 3969 enum ice_status 3970 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, 3971 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, 3972 struct ice_sq_cd *cd) 3973 { 3974 struct ice_aqc_txsched_elem_data node = { 0 }; 3975 struct ice_sched_node *parent; 3976 struct ice_q_ctx *q_ctx; 3977 enum ice_status status; 3978 struct ice_hw *hw; 3979 3980 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 3981 return ICE_ERR_CFG; 3982 3983 if (num_qgrps > 1 || buf->num_txqs > 1) 3984 return ICE_ERR_MAX_LIMIT; 3985 3986 hw = pi->hw; 3987 3988 if (!ice_is_vsi_valid(hw, vsi_handle)) 3989 return ICE_ERR_PARAM; 3990 3991 mutex_lock(&pi->sched_lock); 3992 3993 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle); 3994 if (!q_ctx) { 3995 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n", 3996 q_handle); 3997 status = ICE_ERR_PARAM; 3998 goto ena_txq_exit; 3999 } 4000 4001 /* find a parent node */ 4002 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4003 ICE_SCHED_NODE_OWNER_LAN); 4004 if (!parent) { 4005 status = ICE_ERR_PARAM; 4006 goto ena_txq_exit; 4007 } 4008 4009 buf->parent_teid = parent->info.node_teid; 4010 node.parent_teid = parent->info.node_teid; 4011 /* Mark that the values in the "generic" section as valid. The default 4012 * value in the "generic" section is zero. This means that : 4013 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0. 4014 * - 0 priority among siblings, indicated by Bit 1-3. 4015 * - WFQ, indicated by Bit 4. 4016 * - 0 Adjustment value is used in PSM credit update flow, indicated by 4017 * Bit 5-6. 4018 * - Bit 7 is reserved. 4019 * Without setting the generic section as valid in valid_sections, the 4020 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. 4021 */ 4022 buf->txqs[0].info.valid_sections = 4023 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4024 ICE_AQC_ELEM_VALID_EIR; 4025 buf->txqs[0].info.generic = 0; 4026 buf->txqs[0].info.cir_bw.bw_profile_idx = 4027 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4028 buf->txqs[0].info.cir_bw.bw_alloc = 4029 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4030 buf->txqs[0].info.eir_bw.bw_profile_idx = 4031 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4032 buf->txqs[0].info.eir_bw.bw_alloc = 4033 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4034 4035 /* add the LAN queue */ 4036 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); 4037 if (status) { 4038 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n", 4039 le16_to_cpu(buf->txqs[0].txq_id), 4040 hw->adminq.sq_last_status); 4041 goto ena_txq_exit; 4042 } 4043 4044 node.node_teid = buf->txqs[0].q_teid; 4045 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4046 q_ctx->q_handle = q_handle; 4047 q_ctx->q_teid = le32_to_cpu(node.node_teid); 4048 4049 /* add a leaf node into scheduler tree queue layer */ 4050 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node); 4051 if (!status) 4052 status = ice_sched_replay_q_bw(pi, q_ctx); 4053 4054 ena_txq_exit: 4055 mutex_unlock(&pi->sched_lock); 4056 return status; 4057 } 4058 4059 /** 4060 * ice_dis_vsi_txq 4061 * @pi: port information structure 4062 * @vsi_handle: software VSI handle 4063 * @tc: TC number 4064 * @num_queues: number of queues 4065 * @q_handles: pointer to software queue handle array 4066 * @q_ids: pointer to the q_id array 4067 * @q_teids: pointer to queue node teids 4068 * @rst_src: if called due to reset, specifies the reset source 4069 * @vmvf_num: the relative VM or VF number that is undergoing the reset 4070 * @cd: pointer to command details structure or NULL 4071 * 4072 * This function removes queues and their corresponding nodes in SW DB 4073 */ 4074 enum ice_status 4075 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, 4076 u16 *q_handles, u16 *q_ids, u32 *q_teids, 4077 enum ice_disq_rst_src rst_src, u16 vmvf_num, 4078 struct ice_sq_cd *cd) 4079 { 4080 enum ice_status status = ICE_ERR_DOES_NOT_EXIST; 4081 struct ice_aqc_dis_txq_item *qg_list; 4082 struct ice_q_ctx *q_ctx; 4083 struct ice_hw *hw; 4084 u16 i, buf_size; 4085 4086 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4087 return ICE_ERR_CFG; 4088 4089 hw = pi->hw; 4090 4091 if (!num_queues) { 4092 /* if queue is disabled already yet the disable queue command 4093 * has to be sent to complete the VF reset, then call 4094 * ice_aq_dis_lan_txq without any queue information 4095 */ 4096 if (rst_src) 4097 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, 4098 vmvf_num, NULL); 4099 return ICE_ERR_CFG; 4100 } 4101 4102 buf_size = struct_size(qg_list, q_id, 1); 4103 qg_list = kzalloc(buf_size, GFP_KERNEL); 4104 if (!qg_list) 4105 return ICE_ERR_NO_MEMORY; 4106 4107 mutex_lock(&pi->sched_lock); 4108 4109 for (i = 0; i < num_queues; i++) { 4110 struct ice_sched_node *node; 4111 4112 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); 4113 if (!node) 4114 continue; 4115 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); 4116 if (!q_ctx) { 4117 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", 4118 q_handles[i]); 4119 continue; 4120 } 4121 if (q_ctx->q_handle != q_handles[i]) { 4122 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", 4123 q_ctx->q_handle, q_handles[i]); 4124 continue; 4125 } 4126 qg_list->parent_teid = node->info.parent_teid; 4127 qg_list->num_qs = 1; 4128 qg_list->q_id[0] = cpu_to_le16(q_ids[i]); 4129 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, 4130 vmvf_num, cd); 4131 4132 if (status) 4133 break; 4134 ice_free_sched_node(pi, node); 4135 q_ctx->q_handle = ICE_INVAL_Q_HANDLE; 4136 } 4137 mutex_unlock(&pi->sched_lock); 4138 kfree(qg_list); 4139 return status; 4140 } 4141 4142 /** 4143 * ice_cfg_vsi_qs - configure the new/existing VSI queues 4144 * @pi: port information structure 4145 * @vsi_handle: software VSI handle 4146 * @tc_bitmap: TC bitmap 4147 * @maxqs: max queues array per TC 4148 * @owner: LAN or RDMA 4149 * 4150 * This function adds/updates the VSI queues per TC. 4151 */ 4152 static enum ice_status 4153 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4154 u16 *maxqs, u8 owner) 4155 { 4156 enum ice_status status = 0; 4157 u8 i; 4158 4159 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4160 return ICE_ERR_CFG; 4161 4162 if (!ice_is_vsi_valid(pi->hw, vsi_handle)) 4163 return ICE_ERR_PARAM; 4164 4165 mutex_lock(&pi->sched_lock); 4166 4167 ice_for_each_traffic_class(i) { 4168 /* configuration is possible only if TC node is present */ 4169 if (!ice_sched_get_tc_node(pi, i)) 4170 continue; 4171 4172 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner, 4173 ice_is_tc_ena(tc_bitmap, i)); 4174 if (status) 4175 break; 4176 } 4177 4178 mutex_unlock(&pi->sched_lock); 4179 return status; 4180 } 4181 4182 /** 4183 * ice_cfg_vsi_lan - configure VSI LAN queues 4184 * @pi: port information structure 4185 * @vsi_handle: software VSI handle 4186 * @tc_bitmap: TC bitmap 4187 * @max_lanqs: max LAN queues array per TC 4188 * 4189 * This function adds/updates the VSI LAN queues per TC. 4190 */ 4191 enum ice_status 4192 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap, 4193 u16 *max_lanqs) 4194 { 4195 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs, 4196 ICE_SCHED_NODE_OWNER_LAN); 4197 } 4198 4199 /** 4200 * ice_cfg_vsi_rdma - configure the VSI RDMA queues 4201 * @pi: port information structure 4202 * @vsi_handle: software VSI handle 4203 * @tc_bitmap: TC bitmap 4204 * @max_rdmaqs: max RDMA queues array per TC 4205 * 4206 * This function adds/updates the VSI RDMA queues per TC. 4207 */ 4208 int 4209 ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap, 4210 u16 *max_rdmaqs) 4211 { 4212 return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, 4213 max_rdmaqs, 4214 ICE_SCHED_NODE_OWNER_RDMA)); 4215 } 4216 4217 /** 4218 * ice_ena_vsi_rdma_qset 4219 * @pi: port information structure 4220 * @vsi_handle: software VSI handle 4221 * @tc: TC number 4222 * @rdma_qset: pointer to RDMA Qset 4223 * @num_qsets: number of RDMA Qsets 4224 * @qset_teid: pointer to Qset node TEIDs 4225 * 4226 * This function adds RDMA Qset 4227 */ 4228 int 4229 ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc, 4230 u16 *rdma_qset, u16 num_qsets, u32 *qset_teid) 4231 { 4232 struct ice_aqc_txsched_elem_data node = { 0 }; 4233 struct ice_aqc_add_rdma_qset_data *buf; 4234 struct ice_sched_node *parent; 4235 enum ice_status status; 4236 struct ice_hw *hw; 4237 u16 i, buf_size; 4238 int ret; 4239 4240 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4241 return -EIO; 4242 hw = pi->hw; 4243 4244 if (!ice_is_vsi_valid(hw, vsi_handle)) 4245 return -EINVAL; 4246 4247 buf_size = struct_size(buf, rdma_qsets, num_qsets); 4248 buf = kzalloc(buf_size, GFP_KERNEL); 4249 if (!buf) 4250 return -ENOMEM; 4251 mutex_lock(&pi->sched_lock); 4252 4253 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc, 4254 ICE_SCHED_NODE_OWNER_RDMA); 4255 if (!parent) { 4256 ret = -EINVAL; 4257 goto rdma_error_exit; 4258 } 4259 buf->parent_teid = parent->info.node_teid; 4260 node.parent_teid = parent->info.node_teid; 4261 4262 buf->num_qsets = cpu_to_le16(num_qsets); 4263 for (i = 0; i < num_qsets; i++) { 4264 buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]); 4265 buf->rdma_qsets[i].info.valid_sections = 4266 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | 4267 ICE_AQC_ELEM_VALID_EIR; 4268 buf->rdma_qsets[i].info.generic = 0; 4269 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx = 4270 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4271 buf->rdma_qsets[i].info.cir_bw.bw_alloc = 4272 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4273 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx = 4274 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); 4275 buf->rdma_qsets[i].info.eir_bw.bw_alloc = 4276 cpu_to_le16(ICE_SCHED_DFLT_BW_WT); 4277 } 4278 ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL); 4279 if (ret) { 4280 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n"); 4281 goto rdma_error_exit; 4282 } 4283 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF; 4284 for (i = 0; i < num_qsets; i++) { 4285 node.node_teid = buf->rdma_qsets[i].qset_teid; 4286 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, 4287 &node); 4288 if (status) { 4289 ret = ice_status_to_errno(status); 4290 break; 4291 } 4292 qset_teid[i] = le32_to_cpu(node.node_teid); 4293 } 4294 rdma_error_exit: 4295 mutex_unlock(&pi->sched_lock); 4296 kfree(buf); 4297 return ret; 4298 } 4299 4300 /** 4301 * ice_dis_vsi_rdma_qset - free RDMA resources 4302 * @pi: port_info struct 4303 * @count: number of RDMA Qsets to free 4304 * @qset_teid: TEID of Qset node 4305 * @q_id: list of queue IDs being disabled 4306 */ 4307 int 4308 ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid, 4309 u16 *q_id) 4310 { 4311 struct ice_aqc_dis_txq_item *qg_list; 4312 enum ice_status status = 0; 4313 struct ice_hw *hw; 4314 u16 qg_size; 4315 int i; 4316 4317 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) 4318 return -EIO; 4319 4320 hw = pi->hw; 4321 4322 qg_size = struct_size(qg_list, q_id, 1); 4323 qg_list = kzalloc(qg_size, GFP_KERNEL); 4324 if (!qg_list) 4325 return -ENOMEM; 4326 4327 mutex_lock(&pi->sched_lock); 4328 4329 for (i = 0; i < count; i++) { 4330 struct ice_sched_node *node; 4331 4332 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]); 4333 if (!node) 4334 continue; 4335 4336 qg_list->parent_teid = node->info.parent_teid; 4337 qg_list->num_qs = 1; 4338 qg_list->q_id[0] = 4339 cpu_to_le16(q_id[i] | 4340 ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET); 4341 4342 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size, 4343 ICE_NO_RESET, 0, NULL); 4344 if (status) 4345 break; 4346 4347 ice_free_sched_node(pi, node); 4348 } 4349 4350 mutex_unlock(&pi->sched_lock); 4351 kfree(qg_list); 4352 return ice_status_to_errno(status); 4353 } 4354 4355 /** 4356 * ice_replay_pre_init - replay pre initialization 4357 * @hw: pointer to the HW struct 4358 * 4359 * Initializes required config data for VSI, FD, ACL, and RSS before replay. 4360 */ 4361 static enum ice_status ice_replay_pre_init(struct ice_hw *hw) 4362 { 4363 struct ice_switch_info *sw = hw->switch_info; 4364 u8 i; 4365 4366 /* Delete old entries from replay filter list head if there is any */ 4367 ice_rm_all_sw_replay_rule_info(hw); 4368 /* In start of replay, move entries into replay_rules list, it 4369 * will allow adding rules entries back to filt_rules list, 4370 * which is operational list. 4371 */ 4372 for (i = 0; i < ICE_SW_LKUP_LAST; i++) 4373 list_replace_init(&sw->recp_list[i].filt_rules, 4374 &sw->recp_list[i].filt_replay_rules); 4375 ice_sched_replay_agg_vsi_preinit(hw); 4376 4377 return 0; 4378 } 4379 4380 /** 4381 * ice_replay_vsi - replay VSI configuration 4382 * @hw: pointer to the HW struct 4383 * @vsi_handle: driver VSI handle 4384 * 4385 * Restore all VSI configuration after reset. It is required to call this 4386 * function with main VSI first. 4387 */ 4388 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle) 4389 { 4390 enum ice_status status; 4391 4392 if (!ice_is_vsi_valid(hw, vsi_handle)) 4393 return ICE_ERR_PARAM; 4394 4395 /* Replay pre-initialization if there is any */ 4396 if (vsi_handle == ICE_MAIN_VSI_HANDLE) { 4397 status = ice_replay_pre_init(hw); 4398 if (status) 4399 return status; 4400 } 4401 /* Replay per VSI all RSS configurations */ 4402 status = ice_replay_rss_cfg(hw, vsi_handle); 4403 if (status) 4404 return status; 4405 /* Replay per VSI all filters */ 4406 status = ice_replay_vsi_all_fltr(hw, vsi_handle); 4407 if (!status) 4408 status = ice_replay_vsi_agg(hw, vsi_handle); 4409 return status; 4410 } 4411 4412 /** 4413 * ice_replay_post - post replay configuration cleanup 4414 * @hw: pointer to the HW struct 4415 * 4416 * Post replay cleanup. 4417 */ 4418 void ice_replay_post(struct ice_hw *hw) 4419 { 4420 /* Delete old entries from replay filter list head */ 4421 ice_rm_all_sw_replay_rule_info(hw); 4422 ice_sched_replay_agg(hw); 4423 } 4424 4425 /** 4426 * ice_stat_update40 - read 40 bit stat from the chip and update stat values 4427 * @hw: ptr to the hardware info 4428 * @reg: offset of 64 bit HW register to read from 4429 * @prev_stat_loaded: bool to specify if previous stats are loaded 4430 * @prev_stat: ptr to previous loaded stat value 4431 * @cur_stat: ptr to current stat value 4432 */ 4433 void 4434 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 4435 u64 *prev_stat, u64 *cur_stat) 4436 { 4437 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1); 4438 4439 /* device stats are not reset at PFR, they likely will not be zeroed 4440 * when the driver starts. Thus, save the value from the first read 4441 * without adding to the statistic value so that we report stats which 4442 * count up from zero. 4443 */ 4444 if (!prev_stat_loaded) { 4445 *prev_stat = new_data; 4446 return; 4447 } 4448 4449 /* Calculate the difference between the new and old values, and then 4450 * add it to the software stat value. 4451 */ 4452 if (new_data >= *prev_stat) 4453 *cur_stat += new_data - *prev_stat; 4454 else 4455 /* to manage the potential roll-over */ 4456 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat; 4457 4458 /* Update the previously stored value to prepare for next read */ 4459 *prev_stat = new_data; 4460 } 4461 4462 /** 4463 * ice_stat_update32 - read 32 bit stat from the chip and update stat values 4464 * @hw: ptr to the hardware info 4465 * @reg: offset of HW register to read from 4466 * @prev_stat_loaded: bool to specify if previous stats are loaded 4467 * @prev_stat: ptr to previous loaded stat value 4468 * @cur_stat: ptr to current stat value 4469 */ 4470 void 4471 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 4472 u64 *prev_stat, u64 *cur_stat) 4473 { 4474 u32 new_data; 4475 4476 new_data = rd32(hw, reg); 4477 4478 /* device stats are not reset at PFR, they likely will not be zeroed 4479 * when the driver starts. Thus, save the value from the first read 4480 * without adding to the statistic value so that we report stats which 4481 * count up from zero. 4482 */ 4483 if (!prev_stat_loaded) { 4484 *prev_stat = new_data; 4485 return; 4486 } 4487 4488 /* Calculate the difference between the new and old values, and then 4489 * add it to the software stat value. 4490 */ 4491 if (new_data >= *prev_stat) 4492 *cur_stat += new_data - *prev_stat; 4493 else 4494 /* to manage the potential roll-over */ 4495 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat; 4496 4497 /* Update the previously stored value to prepare for next read */ 4498 *prev_stat = new_data; 4499 } 4500 4501 /** 4502 * ice_sched_query_elem - query element information from HW 4503 * @hw: pointer to the HW struct 4504 * @node_teid: node TEID to be queried 4505 * @buf: buffer to element information 4506 * 4507 * This function queries HW element information 4508 */ 4509 enum ice_status 4510 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 4511 struct ice_aqc_txsched_elem_data *buf) 4512 { 4513 u16 buf_size, num_elem_ret = 0; 4514 enum ice_status status; 4515 4516 buf_size = sizeof(*buf); 4517 memset(buf, 0, buf_size); 4518 buf->node_teid = cpu_to_le32(node_teid); 4519 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 4520 NULL); 4521 if (status || num_elem_ret != 1) 4522 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 4523 return status; 4524 } 4525 4526 /** 4527 * ice_fw_supports_link_override 4528 * @hw: pointer to the hardware structure 4529 * 4530 * Checks if the firmware supports link override 4531 */ 4532 bool ice_fw_supports_link_override(struct ice_hw *hw) 4533 { 4534 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { 4535 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) 4536 return true; 4537 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && 4538 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) 4539 return true; 4540 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { 4541 return true; 4542 } 4543 4544 return false; 4545 } 4546 4547 /** 4548 * ice_get_link_default_override 4549 * @ldo: pointer to the link default override struct 4550 * @pi: pointer to the port info struct 4551 * 4552 * Gets the link default override for a port 4553 */ 4554 enum ice_status 4555 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, 4556 struct ice_port_info *pi) 4557 { 4558 u16 i, tlv, tlv_len, tlv_start, buf, offset; 4559 struct ice_hw *hw = pi->hw; 4560 enum ice_status status; 4561 4562 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, 4563 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); 4564 if (status) { 4565 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n"); 4566 return status; 4567 } 4568 4569 /* Each port has its own config; calculate for our port */ 4570 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + 4571 ICE_SR_PFA_LINK_OVERRIDE_OFFSET; 4572 4573 /* link options first */ 4574 status = ice_read_sr_word(hw, tlv_start, &buf); 4575 if (status) { 4576 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 4577 return status; 4578 } 4579 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; 4580 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> 4581 ICE_LINK_OVERRIDE_PHY_CFG_S; 4582 4583 /* link PHY config */ 4584 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; 4585 status = ice_read_sr_word(hw, offset, &buf); 4586 if (status) { 4587 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n"); 4588 return status; 4589 } 4590 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; 4591 4592 /* PHY types low */ 4593 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; 4594 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 4595 status = ice_read_sr_word(hw, (offset + i), &buf); 4596 if (status) { 4597 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 4598 return status; 4599 } 4600 /* shift 16 bits at a time to fill 64 bits */ 4601 ldo->phy_type_low |= ((u64)buf << (i * 16)); 4602 } 4603 4604 /* PHY types high */ 4605 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + 4606 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; 4607 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { 4608 status = ice_read_sr_word(hw, (offset + i), &buf); 4609 if (status) { 4610 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n"); 4611 return status; 4612 } 4613 /* shift 16 bits at a time to fill 64 bits */ 4614 ldo->phy_type_high |= ((u64)buf << (i * 16)); 4615 } 4616 4617 return status; 4618 } 4619 4620 /** 4621 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled 4622 * @caps: get PHY capability data 4623 */ 4624 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) 4625 { 4626 if (caps->caps & ICE_AQC_PHY_AN_MODE || 4627 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | 4628 ICE_AQC_PHY_AN_EN_CLAUSE73 | 4629 ICE_AQC_PHY_AN_EN_CLAUSE37)) 4630 return true; 4631 4632 return false; 4633 } 4634 4635 /** 4636 * ice_aq_set_lldp_mib - Set the LLDP MIB 4637 * @hw: pointer to the HW struct 4638 * @mib_type: Local, Remote or both Local and Remote MIBs 4639 * @buf: pointer to the caller-supplied buffer to store the MIB block 4640 * @buf_size: size of the buffer (in bytes) 4641 * @cd: pointer to command details structure or NULL 4642 * 4643 * Set the LLDP MIB. (0x0A08) 4644 */ 4645 enum ice_status 4646 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 4647 struct ice_sq_cd *cd) 4648 { 4649 struct ice_aqc_lldp_set_local_mib *cmd; 4650 struct ice_aq_desc desc; 4651 4652 cmd = &desc.params.lldp_set_mib; 4653 4654 if (buf_size == 0 || !buf) 4655 return ICE_ERR_PARAM; 4656 4657 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 4658 4659 desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 4660 desc.datalen = cpu_to_le16(buf_size); 4661 4662 cmd->type = mib_type; 4663 cmd->length = cpu_to_le16(buf_size); 4664 4665 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 4666 } 4667 4668 /** 4669 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl 4670 * @hw: pointer to HW struct 4671 */ 4672 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) 4673 { 4674 if (hw->mac_type != ICE_MAC_E810) 4675 return false; 4676 4677 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) { 4678 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN) 4679 return true; 4680 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN && 4681 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH) 4682 return true; 4683 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) { 4684 return true; 4685 } 4686 return false; 4687 } 4688 4689 /** 4690 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter 4691 * @hw: pointer to HW struct 4692 * @vsi_num: absolute HW index for VSI 4693 * @add: boolean for if adding or removing a filter 4694 */ 4695 enum ice_status 4696 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) 4697 { 4698 struct ice_aqc_lldp_filter_ctrl *cmd; 4699 struct ice_aq_desc desc; 4700 4701 cmd = &desc.params.lldp_filter_ctrl; 4702 4703 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); 4704 4705 if (add) 4706 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD; 4707 else 4708 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; 4709 4710 cmd->vsi_num = cpu_to_le16(vsi_num); 4711 4712 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); 4713 } 4714 4715 /** 4716 * ice_fw_supports_report_dflt_cfg 4717 * @hw: pointer to the hardware structure 4718 * 4719 * Checks if the firmware supports report default configuration 4720 */ 4721 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw) 4722 { 4723 if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) { 4724 if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN) 4725 return true; 4726 if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN && 4727 hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH) 4728 return true; 4729 } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) { 4730 return true; 4731 } 4732 return false; 4733 } 4734