Lines Matching +full:hw +full:- +full:timeout +full:- +full:ms
1 /* SPDX-License-Identifier: BSD-3-Clause */
127 * ice_dump_phy_type - helper function to dump phy_type
128 * @hw: pointer to the HW structure
134 ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
138 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix,
143 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
147 ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix,
152 ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
158 * ice_set_mac_type - Sets MAC type
159 * @hw: pointer to the HW structure
162 * vendor ID and device ID stored in the HW structure.
164 int ice_set_mac_type(struct ice_hw *hw)
166 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
168 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
171 switch (hw->device_id) {
178 hw->mac_type = ICE_MAC_E810;
199 hw->mac_type = ICE_MAC_GENERIC;
205 hw->mac_type = ICE_MAC_GENERIC_3K_E825;
216 hw->mac_type = ICE_MAC_E830;
219 hw->mac_type = ICE_MAC_UNKNOWN;
223 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
229 * @hw: pointer to the hardware structure
233 bool ice_is_generic_mac(struct ice_hw *hw)
235 return (hw->mac_type == ICE_MAC_GENERIC ||
236 hw->mac_type == ICE_MAC_GENERIC_3K ||
237 hw->mac_type == ICE_MAC_GENERIC_3K_E825);
242 * @hw: pointer to the hardware structure
246 bool ice_is_e810(struct ice_hw *hw)
248 return hw->mac_type == ICE_MAC_E810;
253 * @hw: pointer to the hardware structure
257 bool ice_is_e810t(struct ice_hw *hw)
259 switch (hw->device_id) {
261 switch (hw->subsystem_device_id) {
272 switch (hw->subsystem_device_id) {
288 * @hw: pointer to the hardware structure
292 bool ice_is_e830(struct ice_hw *hw)
294 return hw->mac_type == ICE_MAC_E830;
299 * @hw: pointer to the hardware structure
301 * returns true if the device is E823-L or E823-C based, false if not.
303 bool ice_is_e823(struct ice_hw *hw)
305 switch (hw->device_id) {
324 * @hw: pointer to the hardware structure
326 * returns true if the device is E825-C based, false if not.
328 bool ice_is_e825c(struct ice_hw *hw)
330 switch (hw->device_id) {
342 * ice_clear_pf_cfg - Clear PF configuration
343 * @hw: pointer to the hardware structure
348 int ice_clear_pf_cfg(struct ice_hw *hw)
354 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
358 * ice_aq_manage_mac_read - manage MAC address read command
359 * @hw: pointer to the HW struct
368 * Response such as various MAC addresses are stored in HW struct (port.mac)
373 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
390 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
395 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
398 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
403 for (i = 0; i < cmd->num_addr; i++)
405 ice_memcpy(hw->port_info->mac.lan_addr,
408 ice_memcpy(hw->port_info->mac.perm_addr,
444 * ice_set_media_type - Sets media type
455 phy_type_high = pi->phy.phy_type_high;
456 phy_type_low = pi->phy.phy_type_low;
457 media_type = &pi->phy.media_type;
460 if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
462 /* else if PHY types are only BASE-T, then media type is BASET */
504 * ice_aq_get_phy_caps - returns PHY capabilities
522 struct ice_hw *hw;
529 hw = pi->hw;
532 !ice_fw_supports_report_dflt_cfg(hw))
538 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
540 cmd->param0 |= CPU_TO_LE16(report_mode);
542 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
544 ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
563 ice_dump_phy_type(hw, LE64_TO_CPU(pcaps->phy_type_low),
564 LE64_TO_CPU(pcaps->phy_type_high), prefix);
566 ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
568 ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
569 ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
570 pcaps->low_power_ctrl_an);
571 ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
572 pcaps->eee_cap);
573 ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
574 pcaps->eeer_value);
575 ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
576 pcaps->link_fec_options);
577 ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
578 prefix, pcaps->module_compliance_enforcement);
579 ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
580 prefix, pcaps->extended_compliance_code);
581 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
582 pcaps->module_type[0]);
583 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
584 pcaps->module_type[1]);
585 ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
586 pcaps->module_type[2]);
589 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
590 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
591 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
592 sizeof(pi->phy.link_info.module_type),
595 ice_debug(hw, ICE_DBG_LINK, "%s: media_type = 0x%x\n", prefix,
596 pi->phy.media_type);
603 * ice_aq_get_phy_equalization - function to read serdes equalizer value from
605 * @hw: pointer to the HW struct
609 * @output: pointer to the caller-supplied buffer to return serdes equalizer
612 * non-zero status on error
614 int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
622 if (!hw || !output)
635 cmd->activity_id = CPU_TO_LE16(ICE_AQC_ACT_ID_DNL);
636 cmd->ctx = 0;
638 err = ice_aq_send_cmd(hw, &desc, &buf,
646 #define ice_get_link_status_data_ver(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
651 * @hw: pointer to the HW struct
655 static u16 ice_get_link_status_datalen(struct ice_hw *hw)
657 return (ice_get_link_status_data_ver(hw) ==
666 * @link: pointer to link status structure - optional
681 struct ice_hw *hw;
687 hw = pi->hw;
689 li_old = &pi->phy.link_info_old;
690 li = &pi->phy.link_info;
691 hw_fc_info = &pi->fc;
696 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
697 resp->lport_num = pi->lport;
699 status = ice_aq_send_cmd(hw, &desc, &link_data,
700 ice_get_link_status_datalen(hw), cd);
708 li->link_speed = LE16_TO_CPU(link_data.link_speed);
709 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
710 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
711 li->link_info = link_data.link_info;
712 li->link_cfg_err = link_data.link_cfg_err;
713 li->an_info = link_data.an_info;
714 li->ext_info = link_data.ext_info;
715 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
716 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
717 li->topo_media_conflict = link_data.topo_media_conflict;
718 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
725 hw_fc_info->current_mode = ICE_FC_FULL;
727 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
729 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
731 hw_fc_info->current_mode = ICE_FC_NONE;
733 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
735 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
736 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
737 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
738 (unsigned long long)li->phy_type_low);
739 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
740 (unsigned long long)li->phy_type_high);
741 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
742 ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
743 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
744 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
745 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
746 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
747 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
748 li->max_frame_size);
749 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
756 pi->phy.get_link_info = false;
763 * @hw: pointer to the HW struct
770 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
785 if ((hw)->mac_type == ICE_MAC_E830) {
787 val = rd32(hw, E830_PRTMAC_CL01_PAUSE_QUANTA);
789 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
792 val = rd32(hw, E830_PRTMAC_CL01_QUANTA_THRESH);
796 val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(E800_IDX_OF_LFC));
799 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
802 val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(E800_IDX_OF_LFC));
806 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
811 * @hw: pointer to the HW struct
813 * @auto_drop: Tell HW to drop packets if TC queue is blocked
819 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
832 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
834 if (ice_is_fw_auto_drop_supported(hw) && auto_drop)
835 cmd->drop_opts |= ICE_AQ_SET_MAC_AUTO_DROP_BLOCKING_PKTS;
836 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
838 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
842 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
843 * @hw: pointer to the HW struct
845 int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
850 hw->switch_info = (struct ice_switch_info *)
851 ice_malloc(hw, sizeof(*hw->switch_info));
853 sw = hw->switch_info;
858 INIT_LIST_HEAD(&sw->vsi_list_map_head);
859 sw->prof_res_bm_init = 0;
861 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
863 ice_free(hw, hw->switch_info);
870 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
871 * @hw: pointer to the HW struct
875 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
885 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
887 LIST_DEL(&v_pos_map->list_entry);
888 ice_free(hw, v_pos_map);
890 recps = sw->recp_list;
898 LIST_DEL(&rg_entry->l_entry);
899 ice_free(hw, rg_entry);
911 LIST_DEL(&lst_itr->list_entry);
912 ice_free(hw, lst_itr->lkups);
913 ice_free(hw, lst_itr);
923 LIST_DEL(&lst_itr->list_entry);
924 ice_free(hw, lst_itr);
928 ice_free(hw, recps[i].root_buf);
930 ice_rm_sw_replay_rule_info(hw, sw);
931 ice_free(hw, sw->recp_list);
932 ice_free(hw, sw);
936 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
937 * @hw: pointer to the HW struct
939 void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
941 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
946 * @hw: pointer to the HW struct
949 * bandwidth according to the device's configuration during power-on.
951 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
953 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
961 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
962 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
965 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
966 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
972 * ice_print_rollback_msg - print FW rollback message
973 * @hw: pointer to the hardware structure
975 void ice_print_rollback_msg(struct ice_hw *hw)
981 orom = &hw->flash.orom;
982 nvm = &hw->flash.nvm;
985 nvm->major, nvm->minor, nvm->eetrack, orom->major,
986 orom->build, orom->patch);
987 ice_warn(hw,
989 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
994 * @hw: pointer to the hw struct
998 void ice_set_umac_shared(struct ice_hw *hw)
1000 hw->umac_shared = true;
1004 * ice_init_hw - main hardware initialization routine
1005 * @hw: pointer to the hardware structure
1007 int ice_init_hw(struct ice_hw *hw)
1014 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1017 status = ice_set_mac_type(hw);
1021 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
1025 status = ice_reset(hw, ICE_RESET_PFR);
1028 ice_get_itr_intrl_gran(hw);
1030 hw->fw_vsi_num = ICE_DFLT_VSI_INVAL;
1032 status = ice_create_all_ctrlq(hw);
1036 ice_fwlog_set_support_ena(hw);
1037 status = ice_fwlog_set(hw, &hw->fwlog_cfg);
1039 ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging, status %d.\n",
1042 if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_REGISTER_ON_INIT) {
1043 status = ice_fwlog_register(hw);
1045 ice_debug(hw, ICE_DBG_INIT, "Failed to register for FW logging events, status %d.\n",
1048 status = ice_fwlog_unregister(hw);
1050 ice_debug(hw, ICE_DBG_INIT, "Failed to unregister for FW logging events, status %d.\n",
1055 status = ice_init_nvm(hw);
1059 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
1060 ice_print_rollback_msg(hw);
1062 if (!hw->skip_clear_pf) {
1063 status = ice_clear_pf_cfg(hw);
1068 ice_clear_pxe_mode(hw);
1070 status = ice_get_caps(hw);
1074 if (!hw->port_info)
1075 hw->port_info = (struct ice_port_info *)
1076 ice_malloc(hw, sizeof(*hw->port_info));
1077 if (!hw->port_info) {
1082 hw->port_info->loopback_mode = ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_NORMAL;
1084 /* set the back pointer to HW */
1085 hw->port_info->hw = hw;
1088 status = ice_get_initial_sw_cfg(hw);
1092 hw->evb_veb = true;
1094 status = ice_sched_query_res_alloc(hw);
1096 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
1099 ice_sched_get_psm_clk_freq(hw);
1102 status = ice_sched_init_port(hw->port_info);
1106 ice_malloc(hw, sizeof(*pcaps));
1113 status = ice_aq_get_phy_caps(hw->port_info, false,
1115 ice_free(hw, pcaps);
1117 ice_warn(hw, "Get PHY capabilities failed status = %d, continuing anyway\n",
1121 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
1125 if (!hw->sw_entry_point_layer) {
1126 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
1130 INIT_LIST_HEAD(&hw->agg_list);
1132 if (!hw->max_burst_size)
1133 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
1134 status = ice_init_fltr_mgmt_struct(hw);
1141 mac_buf = ice_calloc(hw, 2,
1150 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
1151 ice_free(hw, mac_buf);
1157 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, false,
1162 status = ice_init_hw_tbls(hw);
1165 ice_init_lock(&hw->tnl_lock);
1170 ice_cleanup_fltr_mgmt_struct(hw);
1172 ice_sched_cleanup_all(hw);
1174 ice_free(hw, hw->port_info);
1175 hw->port_info = NULL;
1177 ice_destroy_all_ctrlq(hw);
1182 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
1183 * @hw: pointer to the hardware structure
1189 void ice_deinit_hw(struct ice_hw *hw)
1191 ice_cleanup_fltr_mgmt_struct(hw);
1193 ice_sched_cleanup_all(hw);
1194 ice_sched_clear_agg(hw);
1195 ice_free_seg(hw);
1196 ice_free_hw_tbls(hw);
1197 ice_destroy_lock(&hw->tnl_lock);
1199 if (hw->port_info) {
1200 ice_free(hw, hw->port_info);
1201 hw->port_info = NULL;
1204 ice_destroy_all_ctrlq(hw);
1207 ice_clear_all_vsi_ctx(hw);
1211 * ice_check_reset - Check to see if a global reset is complete
1212 * @hw: pointer to the hardware structure
1214 int ice_check_reset(struct ice_hw *hw)
1219 * or EMPR has occurred. The grst delay value is in 100ms units.
1222 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
1227 reg = rd32(hw, GLGEN_RSTAT);
1233 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
1245 uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.iwarp ?
1252 reg = rd32(hw, GLNVM_ULD) & uld_mask;
1254 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
1261 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
1270 * ice_pf_reset - Reset the PF
1271 * @hw: pointer to the hardware structure
1276 static int ice_pf_reset(struct ice_hw *hw)
1285 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
1286 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
1288 if (ice_check_reset(hw))
1295 reg = rd32(hw, PFGEN_CTRL);
1297 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
1300 * timeout plus the PFR timeout which will account for a possible reset
1307 reg = rd32(hw, PFGEN_CTRL);
1315 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1323 * ice_reset - Perform different types of reset
1324 * @hw: pointer to the hardware structure
1334 int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1340 return ice_pf_reset(hw);
1342 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1346 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1353 val |= rd32(hw, GLGEN_RTRIG);
1354 wr32(hw, GLGEN_RTRIG, val);
1355 ice_flush(hw);
1358 return ice_check_reset(hw);
1363 * @hw: pointer to the hardware structure
1367 * Copies rxq context from dense structure to HW register space
1370 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1380 /* Copy each dword separately to HW */
1382 wr32(hw, QRX_CONTEXT(i, rxq_index),
1385 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1393 * ice_copy_rxq_ctx_from_hw - Copy rxq context register from HW
1394 * @hw: pointer to the hardware structure
1398 * Copies rxq context from HW register space to dense structure
1401 ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1411 /* Copy each dword separately from HW */
1415 *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
1417 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
1451 * @hw: pointer to the hardware structure
1456 * it to HW register space and enables the hardware to prefetch descriptors
1460 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1468 rlan_ctx->prefena = 1;
1470 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1471 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1475 * ice_read_rxq_ctx - Read rxq context from HW
1476 * @hw: pointer to the hardware structure
1480 * Read rxq context from HW register space and then converts it from dense
1484 ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1493 status = ice_copy_rxq_ctx_from_hw(hw, ctx_buf, rxq_index);
1502 * @hw: pointer to the hardware structure
1505 * Clears rxq context in HW register space
1507 int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1516 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1522 * Bit[0-175] is valid
1559 * @hw: pointer to the hardware structure
1563 * Copies Tx completion queue context from dense structure to HW register space
1566 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1577 /* Copy each dword separately to HW */
1579 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1582 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1607 * @hw: pointer to the hardware structure
1612 * writes it to HW register space
1615 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1621 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1622 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1627 * @hw: pointer to the hardware structure
1630 * Clears Tx completion queue context in HW register space
1633 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1642 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1649 * @hw: pointer to the hardware structure
1653 * Copies doorbell queue context from dense structure to HW register space
1656 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1667 /* Copy each dword separately to HW */
1669 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1672 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1698 * @hw: pointer to the hardware structure
1703 * writes it to HW register space
1706 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1712 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1714 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1719 * @hw: pointer to the hardware structure
1722 * Clears doorbell queue context in HW register space
1725 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1734 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1742 * ice_get_sbq - returns the right control queue to use for sideband
1743 * @hw: pointer to the hardware structure
1745 static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
1747 if (!ice_is_generic_mac(hw))
1748 return &hw->adminq;
1749 return &hw->sbq;
1753 * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
1754 * @hw: pointer to the HW struct
1761 ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1764 return ice_sq_send_cmd(hw, ice_get_sbq(hw), (struct ice_aq_desc *)desc,
1769 * ice_sbq_send_cmd_nolock - send Sideband Queue command to Sideband Queue
1771 * @hw: pointer to the HW struct
1778 ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
1781 return ice_sq_send_cmd_nolock(hw, ice_get_sbq(hw),
1787 * ice_sbq_rw_reg_lp - Fill Sideband Queue command, with lock parameter
1788 * @hw: pointer to the HW struct
1794 int ice_sbq_rw_reg_lp(struct ice_hw *hw, struct ice_sbq_msg_input *in,
1804 msg.dest_dev = in->dest_dev;
1805 msg.opcode = in->opcode;
1808 msg.msg_addr_low = CPU_TO_LE16(in->msg_addr_low);
1809 msg.msg_addr_high = CPU_TO_LE32(in->msg_addr_high);
1811 if (in->opcode)
1812 msg.data = CPU_TO_LE32(in->data);
1817 msg_len -= sizeof(msg.data);
1823 status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
1825 status = ice_sbq_send_cmd_nolock(hw, &desc, &msg, msg_len,
1827 if (!status && !in->opcode)
1828 in->data = LE32_TO_CPU
1829 (((struct ice_sbq_msg_cmpl *)&msg)->data);
1834 * ice_sbq_rw_reg - Fill Sideband Queue command
1835 * @hw: pointer to the HW struct
1839 int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag)
1841 return ice_sbq_rw_reg_lp(hw, in, flag, true);
1845 * ice_sbq_lock - Lock the sideband queue's sq_lock
1846 * @hw: pointer to the HW struct
1848 void ice_sbq_lock(struct ice_hw *hw)
1850 ice_acquire_lock(&ice_get_sbq(hw)->sq_lock);
1854 * ice_sbq_unlock - Unlock the sideband queue's sq_lock
1855 * @hw: pointer to the HW struct
1857 void ice_sbq_unlock(struct ice_hw *hw)
1859 ice_release_lock(&ice_get_sbq(hw)->sq_lock);
1893 * ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
1894 * @hw: pointer to the HW struct
1905 ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1916 opcode = LE16_TO_CPU(desc->opcode);
1922 buf_cpy = (u8 *)ice_malloc(hw, buf_size);
1932 status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
1935 hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
1950 ice_free(hw, buf_cpy);
1956 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1957 * @hw: pointer to the HW struct
1966 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1969 return ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
1974 * @hw: pointer to the HW struct
1979 int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1989 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1992 hw->fw_branch = resp->fw_branch;
1993 hw->fw_maj_ver = resp->fw_major;
1994 hw->fw_min_ver = resp->fw_minor;
1995 hw->fw_patch = resp->fw_patch;
1996 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1997 hw->api_branch = resp->api_branch;
1998 hw->api_maj_ver = resp->api_major;
1999 hw->api_min_ver = resp->api_minor;
2000 hw->api_patch = resp->api_patch;
2008 * @hw: pointer to the HW struct
2015 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
2030 cmd->major_ver = dv->major_ver;
2031 cmd->minor_ver = dv->minor_ver;
2032 cmd->build_ver = dv->build_ver;
2033 cmd->subbuild_ver = dv->subbuild_ver;
2036 while (len < sizeof(dv->driver_string) &&
2037 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
2040 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
2045 * @hw: pointer to the HW struct
2051 int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
2061 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
2063 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2068 * @hw: pointer to the HW struct
2072 * @timeout: the maximum time in ms that the driver may hold the resource
2078 * 1) 0 - acquired lock, and can perform download package
2079 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
2080 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
2086 * phase of operation, it is possible that the FW may detect a timeout and issue
2093 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2094 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
2101 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2107 cmd_resp->res_id = CPU_TO_LE16(res);
2108 cmd_resp->access_type = CPU_TO_LE16(access);
2109 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
2110 cmd_resp->timeout = CPU_TO_LE32(*timeout);
2111 *timeout = 0;
2113 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2115 /* The completion specifies the maximum time in ms that the driver
2116 * may hold the resource in the Timeout field.
2123 * and the timeout field indicates the maximum time the current owner
2127 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
2128 *timeout = LE32_TO_CPU(cmd_resp->timeout);
2130 } else if (LE16_TO_CPU(cmd_resp->status) ==
2132 *timeout = LE32_TO_CPU(cmd_resp->timeout);
2134 } else if (LE16_TO_CPU(cmd_resp->status) ==
2139 /* invalid FW response, force a timeout immediately */
2140 *timeout = 0;
2145 * with a busy return value and the timeout field indicates the maximum
2148 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
2149 *timeout = LE32_TO_CPU(cmd_resp->timeout);
2156 * @hw: pointer to the HW struct
2164 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
2170 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2176 cmd->res_id = CPU_TO_LE16(res);
2177 cmd->res_number = CPU_TO_LE32(sdp_number);
2179 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2184 * @hw: pointer to the HW structure
2187 * @timeout: timeout in milliseconds
2192 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
2193 enum ice_aq_res_access_type access, u32 timeout)
2197 u32 time_left = timeout;
2200 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2202 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2213 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
2216 timeout = time_left;
2217 while (status && timeout && time_left) {
2219 timeout = (timeout > delay) ? timeout - delay : 0;
2220 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
2231 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
2236 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
2238 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
2245 * @hw: pointer to the HW structure
2250 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
2255 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2257 status = ice_aq_release_res(hw, res, 0, NULL);
2260 * results in an admin queue timeout, so handle them correctly
2263 (total_delay < hw->adminq.sq_cmd_timeout)) {
2265 status = ice_aq_release_res(hw, res, 0, NULL);
2271 * ice_aq_alloc_free_res - command to allocate/free resources
2272 * @hw: pointer to the HW struct
2282 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
2289 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
2303 cmd->num_entries = CPU_TO_LE16(num_entries);
2305 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2309 * ice_alloc_hw_res - allocate resource
2310 * @hw: pointer to the HW struct
2317 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
2324 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2329 buf->num_elems = CPU_TO_LE16(num);
2330 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
2333 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
2335 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
2340 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
2344 ice_free(hw, buf);
2349 * ice_free_hw_res - free allocated HW resource
2350 * @hw: pointer to the HW struct
2355 int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
2362 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
2367 buf->num_elems = CPU_TO_LE16(num);
2368 buf->res_type = CPU_TO_LE16(type);
2369 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
2372 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
2375 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
2377 ice_free(hw, buf);
2382 * ice_get_num_per_func - determine number of resources per PF
2383 * @hw: pointer to the HW structure
2390 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
2395 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
2405 * ice_print_led_caps - print LED capabilities
2406 * @hw: pointer to the ice_hw instance
2412 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2418 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %u\n", prefix,
2419 caps->led_pin_num);
2421 ice_info(hw, "%s: led_pin_num = %u\n", prefix,
2422 caps->led_pin_num);
2425 if (!caps->led[i])
2429 ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = %u\n",
2430 prefix, i, caps->led[i]);
2432 ice_info(hw, "%s: led[%u] = %u\n", prefix, i,
2433 caps->led[i]);
2438 * ice_print_sdp_caps - print SDP capabilities
2439 * @hw: pointer to the ice_hw instance
2445 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2451 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %u\n", prefix,
2452 caps->sdp_pin_num);
2454 ice_info(hw, "%s: sdp_pin_num = %u\n", prefix,
2455 caps->sdp_pin_num);
2458 if (!caps->sdp[i])
2462 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = %u\n",
2463 prefix, i, caps->sdp[i]);
2465 ice_info(hw, "%s: sdp[%u] = %u\n", prefix,
2466 i, caps->sdp[i]);
2471 * ice_parse_common_caps - parse common device/function capabilities
2472 * @hw: pointer to the HW struct
2484 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
2487 u32 logical_id = LE32_TO_CPU(elem->logical_id);
2488 u32 phys_id = LE32_TO_CPU(elem->phys_id);
2489 u32 number = LE32_TO_CPU(elem->number);
2490 u16 cap = LE16_TO_CPU(elem->cap);
2495 caps->switching_mode = number;
2496 ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %u\n", prefix,
2497 caps->switching_mode);
2500 caps->mgmt_mode = number;
2501 caps->mgmt_protocols_mctp = logical_id;
2502 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %u\n", prefix,
2503 caps->mgmt_mode);
2504 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %u\n", prefix,
2505 caps->mgmt_protocols_mctp);
2508 caps->os2bmc = number;
2509 ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %u\n", prefix, caps->os2bmc);
2512 caps->valid_functions = number;
2513 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = 0x%x\n", prefix,
2514 caps->valid_functions);
2517 caps->sr_iov_1_1 = (number == 1);
2518 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %u\n", prefix,
2519 caps->sr_iov_1_1);
2522 caps->vmdq = (number == 1);
2523 ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %u\n", prefix, caps->vmdq);
2526 caps->evb_802_1_qbg = (number == 1);
2527 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %u\n", prefix, number);
2530 caps->evb_802_1_qbh = (number == 1);
2531 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %u\n", prefix, number);
2534 caps->dcb = (number == 1);
2535 caps->active_tc_bitmap = logical_id;
2536 caps->maxtc = phys_id;
2537 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %u\n", prefix, caps->dcb);
2538 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = 0x%x\n", prefix,
2539 caps->active_tc_bitmap);
2540 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %u\n", prefix, caps->maxtc);
2543 caps->iscsi = (number == 1);
2544 ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %u\n", prefix, caps->iscsi);
2547 caps->rss_table_size = number;
2548 caps->rss_table_entry_width = logical_id;
2549 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %u\n", prefix,
2550 caps->rss_table_size);
2551 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %u\n", prefix,
2552 caps->rss_table_entry_width);
2555 caps->num_rxq = number;
2556 caps->rxq_first_id = phys_id;
2557 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %u\n", prefix,
2558 caps->num_rxq);
2559 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %u\n", prefix,
2560 caps->rxq_first_id);
2563 caps->num_txq = number;
2564 caps->txq_first_id = phys_id;
2565 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %u\n", prefix,
2566 caps->num_txq);
2567 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %u\n", prefix,
2568 caps->txq_first_id);
2571 caps->num_msix_vectors = number;
2572 caps->msix_vector_first_id = phys_id;
2573 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %u\n", prefix,
2574 caps->num_msix_vectors);
2575 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %u\n", prefix,
2576 caps->msix_vector_first_id);
2579 caps->sec_rev_disabled =
2582 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
2583 caps->sec_rev_disabled);
2584 caps->update_disabled =
2587 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
2588 caps->update_disabled);
2589 caps->nvm_unified_update =
2592 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2593 caps->nvm_unified_update);
2594 caps->netlist_auth =
2597 ice_debug(hw, ICE_DBG_INIT, "%s: netlist_auth = %d\n", prefix,
2598 caps->netlist_auth);
2601 caps->mgmt_cem = (number == 1);
2602 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %u\n", prefix,
2603 caps->mgmt_cem);
2606 caps->iwarp = (number == 1);
2607 ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %u\n", prefix, caps->iwarp);
2610 caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
2611 ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
2612 prefix, caps->roce_lag);
2616 caps->led[phys_id] = true;
2617 caps->led_pin_num++;
2618 ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = 1\n", prefix, phys_id);
2623 caps->sdp[phys_id] = true;
2624 caps->sdp_pin_num++;
2625 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = 1\n", prefix, phys_id);
2629 caps->wr_csr_prot = number;
2630 caps->wr_csr_prot |= (u64)logical_id << 32;
2631 ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2632 (unsigned long long)caps->wr_csr_prot);
2635 caps->num_wol_proxy_fltr = number;
2636 caps->wol_proxy_vsi_seid = logical_id;
2637 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2638 caps->acpi_prog_mthd = !!(phys_id &
2640 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2641 ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %u\n", prefix,
2642 caps->num_wol_proxy_fltr);
2643 ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %u\n", prefix,
2644 caps->wol_proxy_vsi_seid);
2645 ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %u\n",
2646 prefix, caps->apm_wol_support);
2649 caps->max_mtu = number;
2650 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %u\n",
2651 prefix, caps->max_mtu);
2654 caps->pcie_reset_avoidance = (number > 0);
2655 ice_debug(hw, ICE_DBG_INIT,
2657 caps->pcie_reset_avoidance);
2660 caps->reset_restrict_support = (number == 1);
2661 ice_debug(hw, ICE_DBG_INIT,
2663 caps->reset_restrict_support);
2670 u8 index = (u8)(cap - ICE_AQC_CAPS_EXT_TOPO_DEV_IMG0);
2672 caps->ext_topo_dev_img_ver_high[index] = number;
2673 caps->ext_topo_dev_img_ver_low[index] = logical_id;
2674 caps->ext_topo_dev_img_part_num[index] =
2677 caps->ext_topo_dev_img_load_en[index] =
2679 caps->ext_topo_dev_img_prog_en[index] =
2681 caps->ext_topo_dev_img_ver_schema[index] =
2683 ice_debug(hw, ICE_DBG_INIT,
2686 caps->ext_topo_dev_img_ver_high[index]);
2687 ice_debug(hw, ICE_DBG_INIT,
2690 caps->ext_topo_dev_img_ver_low[index]);
2691 ice_debug(hw, ICE_DBG_INIT,
2694 caps->ext_topo_dev_img_part_num[index]);
2695 ice_debug(hw, ICE_DBG_INIT,
2698 caps->ext_topo_dev_img_load_en[index]);
2699 ice_debug(hw, ICE_DBG_INIT,
2702 caps->ext_topo_dev_img_prog_en[index]);
2703 ice_debug(hw, ICE_DBG_INIT,
2706 caps->ext_topo_dev_img_ver_schema[index]);
2710 caps->tx_sched_topo_comp_mode_en = (number == 1);
2713 caps->dyn_flattening_en = (number == 1);
2714 ice_debug(hw, ICE_DBG_INIT, "%s: dyn_flattening_en = %d\n",
2715 prefix, caps->dyn_flattening_en);
2718 caps->orom_recovery_update = (number == 1);
2719 ice_debug(hw, ICE_DBG_INIT, "%s: orom_recovery_update = %d\n",
2720 prefix, caps->orom_recovery_update);
2723 caps->next_cluster_id_support = (number == 1);
2724 ice_debug(hw, ICE_DBG_INIT, "%s: next_cluster_id_support = %d\n",
2725 prefix, caps->next_cluster_id_support);
2736 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2737 * @hw: pointer to the HW structure
2740 * Re-calculate the capabilities that are dependent on the number of physical
2745 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2750 if (hw->dev_caps.num_funcs > 4) {
2752 caps->maxtc = 4;
2753 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %u (based on #ports)\n",
2754 caps->maxtc);
2755 if (caps->iwarp) {
2756 ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
2757 caps->iwarp = 0;
2763 if (caps == &hw->dev_caps.common_cap)
2764 ice_info(hw, "RDMA functionality is not available with the current device configuration.\n");
2769 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2770 * @hw: pointer to the HW struct
2777 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2780 u32 number = LE32_TO_CPU(cap->number);
2781 u32 logical_id = LE32_TO_CPU(cap->logical_id);
2783 func_p->num_allocd_vfs = number;
2784 func_p->vf_base_id = logical_id;
2785 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %u\n",
2786 func_p->num_allocd_vfs);
2787 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %u\n",
2788 func_p->vf_base_id);
2792 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2793 * @hw: pointer to the HW struct
2800 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2803 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2804 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %u\n",
2805 LE32_TO_CPU(cap->number));
2806 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %u\n",
2807 func_p->guar_num_vsi);
2811 * ice_parse_func_caps - Parse function capabilities
2812 * @hw: pointer to the HW struct
2825 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2839 found = ice_parse_common_caps(hw, &func_p->common_cap,
2844 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2847 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2852 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2858 ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2859 ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2861 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2865 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2866 * @hw: pointer to the HW struct
2873 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2876 u32 number = LE32_TO_CPU(cap->number);
2878 dev_p->num_funcs = ice_hweight32(number);
2879 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %u\n",
2880 dev_p->num_funcs);
2885 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2886 * @hw: pointer to the HW struct
2893 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2896 u32 number = LE32_TO_CPU(cap->number);
2898 dev_p->num_vfs_exposed = number;
2899 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %u\n",
2900 dev_p->num_vfs_exposed);
2904 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2905 * @hw: pointer to the HW struct
2912 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2915 u32 number = LE32_TO_CPU(cap->number);
2917 dev_p->num_vsi_allocd_to_host = number;
2918 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %u\n",
2919 dev_p->num_vsi_allocd_to_host);
2923 * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
2924 * @hw: pointer to the HW struct
2931 ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2934 dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
2935 dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
2937 ice_info(hw, "PF is configured in %s mode with IP instance ID %u\n",
2938 (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
2939 "primary" : "secondary", dev_p->nac_topo.id);
2941 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
2942 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
2943 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
2944 !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
2945 ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %u\n",
2946 dev_p->nac_topo.id);
2950 * ice_parse_sensor_reading_cap - Parse ICE_AQC_CAPS_SENSOR_READING cap
2951 * @hw: pointer to the HW struct
2959 ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2962 dev_p->supported_sensors = LE32_TO_CPU(cap->number);
2964 ice_debug(hw, ICE_DBG_INIT,
2966 dev_p->supported_sensors);
2970 * ice_parse_dev_caps - Parse device capabilities
2971 * @hw: pointer to the HW struct
2984 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2998 found = ice_parse_common_caps(hw, &dev_p->common_cap,
3003 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
3006 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
3009 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
3012 ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
3015 ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
3020 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%u]: 0x%x\n",
3026 ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
3027 ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
3029 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
3034 * @hw: pointer to the hw struct
3040 ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
3048 if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
3063 * @hw: pointer to the hw struct
3066 * @node_handle: output parameter if node found - optional
3070 * If node_handle is non-NULL it will be modified on function exit. It is only
3071 * valid if the function returns zero, and should be ignored on any non-zero
3078 ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
3094 status = ice_aq_get_netlist_node(hw, &cmd,
3108 * ice_aq_list_caps - query function/device capabilities
3109 * @hw: pointer to the HW struct
3127 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
3141 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3144 *cap_count = LE32_TO_CPU(cmd->count);
3150 * ice_discover_dev_caps - Read and extract device capabilities
3151 * @hw: pointer to the hardware structure
3158 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
3164 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
3174 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
3177 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
3178 ice_free(hw, cbuf);
3184 * ice_discover_func_caps - Read and extract function capabilities
3185 * @hw: pointer to the hardware structure
3192 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
3198 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
3208 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
3211 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
3212 ice_free(hw, cbuf);
3218 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
3219 * @hw: pointer to the hardware structure
3221 void ice_set_safe_mode_caps(struct ice_hw *hw)
3223 struct ice_hw_func_caps *func_caps = &hw->func_caps;
3224 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
3229 cached_caps = func_caps->common_cap;
3235 func_caps->common_cap.name = cached_caps.name
3246 func_caps->common_cap.num_rxq = 1;
3247 func_caps->common_cap.num_txq = 1;
3250 func_caps->common_cap.num_msix_vectors = 2;
3251 func_caps->guar_num_vsi = 1;
3254 cached_caps = dev_caps->common_cap;
3255 num_funcs = dev_caps->num_funcs;
3261 dev_caps->common_cap.name = cached_caps.name
3270 dev_caps->num_funcs = num_funcs;
3273 dev_caps->common_cap.num_rxq = num_funcs;
3274 dev_caps->common_cap.num_txq = num_funcs;
3277 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
3281 * ice_get_caps - get info about the HW
3282 * @hw: pointer to the hardware structure
3284 int ice_get_caps(struct ice_hw *hw)
3288 status = ice_discover_dev_caps(hw, &hw->dev_caps);
3292 return ice_discover_func_caps(hw, &hw->func_caps);
3296 * ice_aq_manage_mac_write - manage MAC address write command
3297 * @hw: pointer to the HW struct
3305 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
3314 cmd->flags = flags;
3315 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
3317 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3322 * @hw: pointer to the HW struct
3326 static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
3333 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
3337 * ice_clear_pxe_mode - clear pxe operations mode
3338 * @hw: pointer to the HW struct
3341 * like descriptor fetch/write-back mode.
3343 void ice_clear_pxe_mode(struct ice_hw *hw)
3345 if (ice_check_sq_alive(hw, &hw->adminq))
3346 ice_aq_clear_pxe_mode(hw);
3350 * ice_aq_set_port_params - set physical port parameters
3366 struct ice_hw *hw = pi->hw;
3373 cmd->lb_mode = pi->loopback_mode |
3375 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
3382 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
3384 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3389 * @hw: pointer to the HW struct
3394 bool ice_is_100m_speed_supported(struct ice_hw *hw)
3396 switch (hw->device_id) {
3408 * ice_get_link_speed_based_on_phy_type - returns link speed
3559 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
3597 * @hw: pointer to the HW struct
3608 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
3617 /* Ensure that only valid bits of cfg->caps can be turned on. */
3618 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
3619 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
3620 cfg->caps);
3622 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
3626 desc.params.set_phy.lport_num = pi->lport;
3629 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
3630 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
3631 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
3632 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
3633 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
3634 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
3635 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
3636 cfg->low_power_ctrl_an);
3637 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
3638 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
3639 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
3640 cfg->link_fec_opt);
3642 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
3644 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3648 pi->phy.curr_user_phy_cfg = *cfg;
3654 * ice_update_link_info - update status of the HW network link
3665 li = &pi->phy.link_info;
3671 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
3673 struct ice_hw *hw;
3675 hw = pi->hw;
3677 ice_malloc(hw, sizeof(*pcaps));
3685 ice_memcpy(li->module_type, &pcaps->module_type,
3686 sizeof(li->module_type),
3689 ice_free(hw, pcaps);
3713 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
3716 pi->phy.curr_user_speed_req =
3720 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
3779 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
3800 ice_malloc(pi->hw, sizeof(*pcaps));
3809 ice_free(pi->hw, pcaps);
3813 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
3814 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
3816 ice_free(pi->hw, pcaps);
3834 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
3838 cfg->caps |= pause_mask;
3860 struct ice_hw *hw;
3867 hw = pi->hw;
3870 ice_malloc(hw, sizeof(*pcaps));
3886 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3895 if (cfg.caps != pcaps->caps) {
3902 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3927 ice_free(hw, pcaps);
3955 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3956 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3957 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3958 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3959 phy_caps->eee_cap != phy_cfg->eee_cap ||
3960 phy_caps->eeer_value != phy_cfg->eeer_value ||
3961 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3968 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3985 cfg->phy_type_low = caps->phy_type_low;
3986 cfg->phy_type_high = caps->phy_type_high;
3987 cfg->caps = caps->caps;
3988 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3989 cfg->eee_cap = caps->eee_cap;
3990 cfg->eeer_value = caps->eeer_value;
3991 cfg->link_fec_opt = caps->link_fec_options;
3992 cfg->module_compliance_enforcement =
3993 caps->module_compliance_enforcement;
3997 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
4007 struct ice_hw *hw;
4013 hw = pi->hw;
4016 ice_malloc(hw, sizeof(*pcaps));
4021 (ice_fw_supports_report_dflt_cfg(hw) ?
4028 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
4029 cfg->link_fec_opt = pcaps->link_fec_options;
4033 /* Clear RS bits, and AND BASE-R ability
4036 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
4038 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
4042 /* Clear BASE-R bits, and AND RS ability
4045 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
4046 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
4051 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
4055 if (!ice_fw_supports_fec_dis_auto(hw)) {
4059 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_DIS;
4060 /* fall-through */
4063 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
4064 cfg->link_fec_opt |= pcaps->link_fec_options;
4071 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw) &&
4072 !ice_fw_supports_report_dflt_cfg(pi->hw)) {
4080 cfg->link_fec_opt = tlv.fec_options;
4084 ice_free(hw, pcaps);
4090 * ice_get_link_status - get status of the HW network link
4106 phy_info = &pi->phy;
4108 if (phy_info->get_link_info) {
4112 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
4116 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
4127 * Sets up the link and restarts the Auto-Negotiation over the link.
4141 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
4142 cmd->lport_num = pi->lport;
4144 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
4146 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
4148 status = ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
4153 pi->phy.curr_user_phy_cfg.caps |= ICE_AQC_PHY_EN_LINK;
4155 pi->phy.curr_user_phy_cfg.caps &= ~ICE_AQC_PHY_EN_LINK;
4162 * @hw: pointer to the HW struct
4170 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
4180 cmd->lport_num = port_num;
4182 cmd->event_mask = CPU_TO_LE16(mask);
4183 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4188 * @hw: pointer to the HW struct
4195 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
4204 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
4206 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4212 * @is_orig_mode: is this LED set to original mode (by the net-list)
4222 struct ice_hw *hw = pi->hw;
4230 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
4232 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
4234 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4239 * @hw: pointer to the HW struct
4246 * @length: 1-16 for read, 1 for write.
4253 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
4267 cmd->lport_num = (u8)(lport & 0xff);
4268 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
4269 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
4274 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
4275 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
4277 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
4279 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
4285 * @hw: pointer to the hardware structure
4293 ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
4304 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4307 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4312 * @hw: pointer to the hardware structure
4322 ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
4340 ice_memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params),
4342 cmd->start_address = CPU_TO_LE32(start_address);
4344 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
4348 ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
4398 return -1;
4404 * @hw: pointer to the hardware structure
4411 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
4422 vsi_handle = params->vsi_handle;
4423 lut = params->lut;
4424 lut_size = ice_lut_type_to_size(params->lut_type);
4425 lut_type = params->lut_type & ICE_LUT_TYPE_MASK;
4428 glob_lut_idx = params->global_lut_id;
4430 if (!lut || !lut_size || !ice_is_vsi_valid(hw, vsi_handle))
4433 if (lut_size > params->lut_size)
4436 if (set && lut_size != params->lut_size)
4439 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
4448 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4459 cmd_resp->flags = CPU_TO_LE16(flags);
4460 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
4461 params->lut_size = LE16_TO_CPU(desc.datalen);
4467 * @hw: pointer to the hardware structure
4473 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
4475 return __ice_aq_get_set_rss_lut(hw, get_params, false);
4480 * @hw: pointer to the hardware structure
4486 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
4488 return __ice_aq_get_set_rss_lut(hw, set_params, true);
4493 * @hw: pointer to the HW struct
4500 static int __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
4517 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
4522 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
4527 * @hw: pointer to the HW struct
4534 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
4537 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
4540 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4546 * @hw: pointer to the HW struct
4553 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
4556 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
4559 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
4565 * @hw: pointer to the hardware structure
4585 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4594 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4607 sum_size += ice_struct_size(list, txqs, list->num_txqs);
4608 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
4609 list->num_txqs);
4617 cmd->num_qgrps = num_qgrps;
4619 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4624 * @hw: pointer to the hardware structure
4635 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
4646 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4657 cmd->num_entries = num_qgrps;
4659 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
4664 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
4665 cmd->vmvf_and_timeout |=
4669 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
4671 cmd->vmvf_and_timeout |=
4672 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
4681 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
4692 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
4695 if ((item->num_qs % 2) == 0)
4707 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
4710 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
4711 vmvf_num, hw->adminq.sq_last_status);
4713 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
4715 hw->adminq.sq_last_status);
4722 * @hw: pointer to the hardware structure
4727 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
4728 * @timeout: timeout in units of 100 usec (valid values 0-50)
4730 * @buf: struct containing src/dest TEID and per-queue info
4738 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
4740 u8 timeout, u32 *blocked_cgds,
4752 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
4764 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
4767 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
4770 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
4773 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
4775 cmd->num_qs = num_qs;
4776 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
4779 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
4782 *txqs_moved = cmd->num_qs;
4784 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
4786 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
4793 * @hw: pointer to the hardware structure
4802 ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
4811 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
4824 u16 num_qsets = LE16_TO_CPU(list->num_qsets);
4827 list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
4836 cmd->num_qset_grps = num_qset_grps;
4838 return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
4844 * ice_write_byte - write a byte to a packed context structure
4857 from = src_ctx + ce_info->offset;
4860 shift_width = ce_info->lsb % 8;
4861 mask = (u8)(BIT(ce_info->width) - 1);
4871 dest = dest_ctx + (ce_info->lsb / 8);
4883 * ice_write_word - write a word to a packed context structure
4897 from = src_ctx + ce_info->offset;
4900 shift_width = ce_info->lsb % 8;
4901 mask = BIT(ce_info->width) - 1;
4914 dest = dest_ctx + (ce_info->lsb / 8);
4926 * ice_write_dword - write a dword to a packed context structure
4940 from = src_ctx + ce_info->offset;
4943 shift_width = ce_info->lsb % 8;
4949 if (ce_info->width < 32)
4950 mask = BIT(ce_info->width) - 1;
4965 dest = dest_ctx + (ce_info->lsb / 8);
4977 * ice_write_qword - write a qword to a packed context structure
4991 from = src_ctx + ce_info->offset;
4994 shift_width = ce_info->lsb % 8;
5000 if (ce_info->width < 64)
5001 mask = BIT_ULL(ce_info->width) - 1;
5016 dest = dest_ctx + (ce_info->lsb / 8);
5028 * ice_set_ctx - set context bits in packed structure
5029 * @hw: pointer to the hardware structure
5030 * @src_ctx: pointer to a generic non-packed context structure
5035 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
5046 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
5073 * @hw: pointer to the hardware structure
5085 * Get internal FW/HW data (0xFF08) for debug purposes.
5088 ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
5104 cmd->cluster_id = CPU_TO_LE16(cluster_id);
5105 cmd->table_id = CPU_TO_LE16(table_id);
5106 cmd->idx = CPU_TO_LE32(start);
5108 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5114 *ret_next_cluster = LE16_TO_CPU(cmd->cluster_id);
5116 *ret_next_table = LE16_TO_CPU(cmd->table_id);
5118 *ret_next_index = LE32_TO_CPU(cmd->idx);
5125 * ice_read_byte - read context byte into struct
5138 shift_width = ce_info->lsb % 8;
5139 mask = (u8)(BIT(ce_info->width) - 1);
5145 src = src_ctx + (ce_info->lsb / 8);
5154 target = dest_ctx + ce_info->offset;
5161 * ice_read_word - read context word into struct
5175 shift_width = ce_info->lsb % 8;
5176 mask = BIT(ce_info->width) - 1;
5182 src = src_ctx + (ce_info->lsb / 8);
5197 target = dest_ctx + ce_info->offset;
5204 * ice_read_dword - read context dword into struct
5218 shift_width = ce_info->lsb % 8;
5224 if (ce_info->width < 32)
5225 mask = BIT(ce_info->width) - 1;
5233 src = src_ctx + (ce_info->lsb / 8);
5248 target = dest_ctx + ce_info->offset;
5255 * ice_read_qword - read context qword into struct
5269 shift_width = ce_info->lsb % 8;
5275 if (ce_info->width < 64)
5276 mask = BIT_ULL(ce_info->width) - 1;
5284 src = src_ctx + (ce_info->lsb / 8);
5299 target = dest_ctx + ce_info->offset;
5306 * ice_get_ctx - extract context bits from a packed structure
5308 * @dest_ctx: pointer to a generic non-packed context structure
5340 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
5341 * @hw: pointer to the HW struct
5347 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
5352 vsi = ice_get_vsi_ctx(hw, vsi_handle);
5355 if (q_handle >= vsi->num_lan_q_entries[tc])
5357 if (!vsi->lan_q_ctx[tc])
5359 q_ctx = vsi->lan_q_ctx[tc];
5384 struct ice_hw *hw;
5387 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5390 if (num_qgrps > 1 || buf->num_txqs > 1)
5393 hw = pi->hw;
5395 if (!ice_is_vsi_valid(hw, vsi_handle))
5398 ice_acquire_lock(&pi->sched_lock);
5400 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
5402 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
5416 buf->parent_teid = parent->info.node_teid;
5417 node.parent_teid = parent->info.node_teid;
5420 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
5421 * - 0 priority among siblings, indicated by Bit 1-3.
5422 * - WFQ, indicated by Bit 4.
5423 * - 0 Adjustment value is used in PSM credit update flow, indicated by
5424 * Bit 5-6.
5425 * - Bit 7 is reserved.
5429 buf->txqs[0].info.valid_sections =
5432 buf->txqs[0].info.generic = 0;
5433 buf->txqs[0].info.cir_bw.bw_profile_idx =
5435 buf->txqs[0].info.cir_bw.bw_alloc =
5437 buf->txqs[0].info.eir_bw.bw_profile_idx =
5439 buf->txqs[0].info.eir_bw.bw_alloc =
5443 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
5445 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
5446 LE16_TO_CPU(buf->txqs[0].txq_id),
5447 hw->adminq.sq_last_status);
5451 node.node_teid = buf->txqs[0].q_teid;
5453 q_ctx->q_handle = q_handle;
5454 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
5457 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node, NULL);
5462 ice_release_lock(&pi->sched_lock);
5490 struct ice_hw *hw;
5493 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5496 hw = pi->hw;
5504 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
5510 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
5514 ice_acquire_lock(&pi->sched_lock);
5519 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
5522 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
5524 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
5528 if (q_ctx->q_handle != q_handles[i]) {
5529 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
5530 q_ctx->q_handle, q_handles[i]);
5533 qg_list->parent_teid = node->info.parent_teid;
5534 qg_list->num_qs = 1;
5535 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
5536 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
5542 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
5544 ice_release_lock(&pi->sched_lock);
5545 ice_free(hw, qg_list);
5550 * ice_cfg_vsi_qs - configure the new/existing VSI queues
5566 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5569 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
5572 ice_acquire_lock(&pi->sched_lock);
5585 ice_release_lock(&pi->sched_lock);
5590 * ice_cfg_vsi_lan - configure VSI LAN queues
5607 * ice_cfg_vsi_rdma - configure the VSI RDMA queues
5641 struct ice_hw *hw;
5645 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5647 hw = pi->hw;
5649 if (!ice_is_vsi_valid(hw, vsi_handle))
5653 buf = (struct ice_aqc_add_rdma_qset_data *)ice_malloc(hw, buf_size);
5656 ice_acquire_lock(&pi->sched_lock);
5664 buf->parent_teid = parent->info.node_teid;
5665 node.parent_teid = parent->info.node_teid;
5667 buf->num_qsets = CPU_TO_LE16(num_qsets);
5669 buf->rdma_qsets[i].tx_qset_id = CPU_TO_LE16(rdma_qset[i]);
5670 buf->rdma_qsets[i].info.valid_sections =
5673 buf->rdma_qsets[i].info.generic = 0;
5674 buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
5676 buf->rdma_qsets[i].info.cir_bw.bw_alloc =
5678 buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
5680 buf->rdma_qsets[i].info.eir_bw.bw_alloc =
5683 status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
5685 ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
5690 node.node_teid = buf->rdma_qsets[i].qset_teid;
5691 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
5698 ice_release_lock(&pi->sched_lock);
5699 ice_free(hw, buf);
5704 * ice_dis_vsi_rdma_qset - free RDMA resources
5715 struct ice_hw *hw;
5720 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
5723 hw = pi->hw;
5726 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, qg_size);
5730 ice_acquire_lock(&pi->sched_lock);
5735 node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
5739 qg_list->parent_teid = node->info.parent_teid;
5740 qg_list->num_qs = 1;
5741 qg_list->q_id[0] =
5745 status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
5753 ice_release_lock(&pi->sched_lock);
5754 ice_free(hw, qg_list);
5760 * @hw: pointer to the HW struct
5769 ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
5782 cmd->sensor = sensor;
5783 cmd->format = format;
5785 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
5795 * ice_is_main_vsi - checks whether the VSI is main VSI
5796 * @hw: pointer to the HW struct
5802 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
5804 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
5808 * ice_replay_pre_init - replay pre initialization
5809 * @hw: pointer to the HW struct
5815 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
5821 ice_rm_sw_replay_rule_info(hw, sw);
5827 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
5828 &sw->recp_list[i].filt_replay_rules);
5829 ice_sched_replay_agg_vsi_preinit(hw);
5831 status = ice_sched_replay_root_node_bw(hw->port_info);
5835 return ice_sched_replay_tc_node_bw(hw->port_info);
5839 * ice_replay_vsi - replay VSI configuration
5840 * @hw: pointer to the HW struct
5846 int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
5848 struct ice_switch_info *sw = hw->switch_info;
5849 struct ice_port_info *pi = hw->port_info;
5852 if (!ice_is_vsi_valid(hw, vsi_handle))
5855 /* Replay pre-initialization if there is any */
5856 if (ice_is_main_vsi(hw, vsi_handle)) {
5857 status = ice_replay_pre_init(hw, sw);
5862 status = ice_replay_rss_cfg(hw, vsi_handle);
5866 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
5868 status = ice_replay_vsi_agg(hw, vsi_handle);
5873 * ice_replay_post - post replay configuration cleanup
5874 * @hw: pointer to the HW struct
5878 void ice_replay_post(struct ice_hw *hw)
5881 ice_rm_all_sw_replay_rule_info(hw);
5882 ice_sched_replay_agg(hw);
5886 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
5887 * @hw: ptr to the hardware info
5888 * @reg: offset of 64 bit HW register to read from
5894 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5897 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
5913 *cur_stat += new_data - *prev_stat;
5915 /* to manage the potential roll-over */
5916 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
5923 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
5924 * @hw: ptr to the hardware info
5925 * @reg: offset of HW register to read from
5931 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
5936 new_data = rd32(hw, reg);
5952 *cur_stat += new_data - *prev_stat;
5954 /* to manage the potential roll-over */
5955 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
5962 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
5963 * @hw: ptr to the hardware info
5984 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
5990 if (!ice_is_vsi_valid(hw, vsi_handle))
5993 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
5997 wr32(hw, GLV_REPC(vsi_num), 0);
6001 repc = rd32(hw, GLV_REPC(vsi_num));
6006 wr32(hw, GLV_REPC(vsi_num), 0);
6008 cur_stats->rx_no_desc += no_desc;
6009 cur_stats->rx_errors += error_cnt;
6014 * @hw: pointer to the hardware structure
6024 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
6034 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
6035 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
6036 cmd->dword0_value = CPU_TO_LE32(reg_val0);
6037 cmd->dword1_value = CPU_TO_LE32(reg_val1);
6039 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6046 * @hw: pointer to the hardware structure
6057 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
6070 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
6071 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
6073 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6076 *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
6079 *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
6087 * @hw: pointer to the HW structure.
6094 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
6106 cmd->flags = bios_mode;
6108 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6110 *reset_needed = (LE16_TO_CPU(cmd->flags) &
6118 * @hw: pointer to the HW structure.
6123 int ice_aq_alternate_clear(struct ice_hw *hw)
6130 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6136 * ice_sched_query_elem - query element information from HW
6137 * @hw: pointer to the HW struct
6141 * This function queries HW element information
6144 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
6152 buf->node_teid = CPU_TO_LE32(node_teid);
6153 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
6156 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
6161 * ice_get_fw_mode - returns FW mode
6162 * @hw: pointer to the HW struct
6164 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
6172 fw_mode = rd32(hw, GL_MNG_FWSM) & E800_GL_MNG_FWSM_FW_MODES_M;
6185 * @hw: pointer to the HW struct
6191 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
6193 struct ice_port_info *pi = hw->port_info;
6201 ret = ice_acquire_nvm(hw, ICE_RES_READ);
6205 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
6212 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6215 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6218 ice_release_nvm(hw);
6225 * @hw: pointer to the HW struct
6231 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
6233 struct ice_port_info *pi = hw->port_info;
6242 ret = ice_acquire_nvm(hw, ICE_RES_READ);
6247 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
6264 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
6278 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
6284 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6287 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
6291 ice_release_nvm(hw);
6298 * @hw: pointer to the hw struct
6300 * @bus_addr: 7-bit I2C bus address
6302 * @params: I2C parameters: bit [7] - Repeated start, bits [6:5] data offset size,
6303 * bit [4] - I2C address type, bits [3:0] - data size to read (0-16 bytes)
6310 ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
6327 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
6328 cmd->topo_addr = topo_addr;
6329 cmd->i2c_params = params;
6330 cmd->i2c_addr = addr;
6332 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6339 *data = resp->i2c_data[i];
6349 * @hw: pointer to the hw struct
6351 * @bus_addr: 7-bit I2C bus address
6353 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size to write (0-7 bytes)
6360 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
6377 cmd->i2c_bus_addr = CPU_TO_LE16(bus_addr);
6378 cmd->topo_addr = topo_addr;
6379 cmd->i2c_params = params;
6380 cmd->i2c_addr = addr;
6383 cmd->i2c_data[i] = *data;
6387 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6392 * @hw: pointer to the hw struct
6401 ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
6409 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6410 cmd->gpio_num = pin_idx;
6411 cmd->gpio_val = value ? 1 : 0;
6413 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6418 * @hw: pointer to the hw struct
6428 ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
6437 cmd->gpio_ctrl_handle = CPU_TO_LE16(gpio_ctrl_handle);
6438 cmd->gpio_num = pin_idx;
6440 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6444 *value = !!cmd->gpio_val;
6450 * @hw: pointer to the hardware structure
6457 static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
6459 if (hw->api_maj_ver == maj) {
6460 if (hw->api_min_ver > min)
6462 if (hw->api_min_ver == min && hw->api_patch >= patch)
6464 } else if (hw->api_maj_ver > maj) {
6473 * @hw: pointer to the hardware structure
6481 static bool ice_is_fw_min_ver(struct ice_hw *hw, u8 branch, u8 maj, u8 min,
6484 if (hw->fw_branch == branch) {
6485 if (hw->fw_maj_ver > maj)
6487 if (hw->fw_maj_ver == maj) {
6488 if (hw->fw_min_ver > min)
6490 if (hw->fw_min_ver == min && hw->fw_patch >= patch)
6500 * @hw: pointer to the hardware structure
6504 bool ice_fw_supports_link_override(struct ice_hw *hw)
6506 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
6523 struct ice_hw *hw = pi->hw;
6526 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
6529 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
6534 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
6538 status = ice_read_sr_word(hw, tlv_start, &buf);
6540 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6543 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
6544 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
6549 status = ice_read_sr_word(hw, offset, &buf);
6551 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
6554 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
6559 status = ice_read_sr_word(hw, (offset + i), &buf);
6561 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6565 ldo->phy_type_low |= ((u64)buf << (i * 16));
6572 status = ice_read_sr_word(hw, (offset + i), &buf);
6574 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
6578 ldo->phy_type_high |= ((u64)buf << (i * 16));
6585 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
6590 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
6591 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
6601 * @hw: pointer to the hardware structure
6606 bool ice_is_fw_health_report_supported(struct ice_hw *hw)
6608 if (hw->api_maj_ver > ICE_FW_API_HEALTH_REPORT_MAJ)
6611 if (hw->api_maj_ver == ICE_FW_API_HEALTH_REPORT_MAJ) {
6612 if (hw->api_min_ver > ICE_FW_API_HEALTH_REPORT_MIN)
6614 if (hw->api_min_ver == ICE_FW_API_HEALTH_REPORT_MIN &&
6615 hw->api_patch >= ICE_FW_API_HEALTH_REPORT_PATCH)
6623 * ice_aq_set_health_status_config - Configure FW health events
6624 * @hw: pointer to the HW struct
6629 * PF. The supported event types are: PF-specific, all PFs, and global
6632 ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
6643 cmd->event_source = event_source;
6645 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
6650 * @hw: pointer to the hw struct
6652 * @option_count: input - size of the buffer in port options structures,
6653 * output - number of returned port options
6665 ice_aq_get_port_options(struct ice_hw *hw,
6683 cmd->lport_num = lport;
6684 cmd->lport_num_valid = lport_valid;
6686 status = ice_aq_send_cmd(hw, &desc, options,
6692 *option_count = cmd->port_options_count & ICE_AQC_PORT_OPT_COUNT_M;
6693 ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
6694 *active_option_valid = cmd->port_options & ICE_AQC_PORT_OPT_VALID;
6696 *active_option_idx = cmd->port_options &
6698 if (*active_option_idx > (*option_count - 1))
6700 ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
6704 *pending_option_valid = cmd->pending_port_option_status &
6707 *pending_option_idx = cmd->pending_port_option_status &
6709 if (*pending_option_idx > (*option_count - 1))
6711 ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
6719 ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
6728 * @hw: pointer to the hw struct
6737 ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
6749 cmd->lport_num = lport;
6751 cmd->lport_num_valid = lport_valid;
6752 cmd->selected_port_option = new_option;
6754 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6758 * ice_aq_set_lldp_mib - Set the LLDP MIB
6759 * @hw: pointer to the HW struct
6761 * @buf: pointer to the caller-supplied buffer to store the MIB block
6768 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
6784 cmd->type = mib_type;
6785 cmd->length = CPU_TO_LE16(buf_size);
6787 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
6791 * ice_fw_supports_lldp_fltr_ctrl - check NVM version supports lldp_fltr_ctrl
6792 * @hw: pointer to HW struct
6794 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
6796 if (hw->mac_type != ICE_MAC_E810 && hw->mac_type != ICE_MAC_GENERIC)
6799 return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
6805 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
6806 * @hw: pointer to HW struct
6807 * @vsi_num: absolute HW index for VSI
6811 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
6821 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
6823 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
6825 cmd->vsi_num = CPU_TO_LE16(vsi_num);
6827 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6831 * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
6832 * @hw: pointer to HW struct
6834 int ice_lldp_execute_pending_mib(struct ice_hw *hw)
6840 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
6845 * @hw: pointer to the hardware structure
6849 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
6851 return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
6861 * value, but is indexed by [fls(speed) - 1]
6879 * ice_get_link_speed - get integer speed from table
6880 * @index: array index from fls(aq speed) - 1
6894 * @hw: pointer to the hardware structure
6898 bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
6900 if (ice_is_e830(hw))
6902 return ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E810,
6906 ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E82X,
6914 * @hw: pointer to the hardware structure
6918 bool ice_is_fw_auto_drop_supported(struct ice_hw *hw)
6920 if (hw->api_maj_ver >= ICE_FW_API_AUTO_DROP_MAJ &&
6921 hw->api_min_ver >= ICE_FW_API_AUTO_DROP_MIN)