Lines Matching full:smu

50 #define smu_cmn_call_asic_func(intf, smu, args...)                             \  argument
51 ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
52 (smu)->ppt_funcs->intf(smu, ##args) : \
56 static const char *smu_get_message_name(struct smu_context *smu, in smu_get_message_name() argument
60 return "unknown smu message"; in smu_get_message_name()
65 static void smu_cmn_read_arg(struct smu_context *smu, in smu_cmn_read_arg() argument
68 struct amdgpu_device *adev = smu->adev; in smu_cmn_read_arg()
70 *arg = RREG32(smu->param_reg); in smu_cmn_read_arg()
73 /* Redefine the SMU error codes here.
76 * when the SMU has exported a unified header file containing these
77 * macros, which header file we can just include and use the SMU's
78 * macros. At the moment, these error codes are defined by the SMU
91 * __smu_cmn_poll_stat -- poll for a status from the SMU
92 * @smu: a pointer to SMU context
94 * Returns the status of the SMU, which could be,
95 * 0, the SMU is busy with your command;
100 * 0xFC, the command was rejected as the SMU is busy;
105 * maintained by the SMU FW team, so that we're impervious to firmware
110 static u32 __smu_cmn_poll_stat(struct smu_context *smu) in __smu_cmn_poll_stat() argument
112 struct amdgpu_device *adev = smu->adev; in __smu_cmn_poll_stat()
117 reg = RREG32(smu->resp_reg); in __smu_cmn_poll_stat()
127 static void __smu_cmn_reg_print_error(struct smu_context *smu, in __smu_cmn_reg_print_error() argument
133 struct amdgpu_device *adev = smu->adev; in __smu_cmn_reg_print_error()
134 const char *message = smu_get_message_name(smu, msg); in __smu_cmn_reg_print_error()
139 msg_idx = RREG32(smu->msg_reg); in __smu_cmn_reg_print_error()
140 prm = RREG32(smu->param_reg); in __smu_cmn_reg_print_error()
142 "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", in __smu_cmn_reg_print_error()
147 /* The SMU executed the command. It completed with a in __smu_cmn_reg_print_error()
152 /* The SMU executed the command. It completed with an in __smu_cmn_reg_print_error()
158 "SMU: unknown command: index:%d param:0x%08X message:%s", in __smu_cmn_reg_print_error()
163 "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s", in __smu_cmn_reg_print_error()
168 "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s", in __smu_cmn_reg_print_error()
173 "SMU: I'm debugging!"); in __smu_cmn_reg_print_error()
176 if (amdgpu_device_bus_status_check(smu->adev)) { in __smu_cmn_reg_print_error()
179 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", in __smu_cmn_reg_print_error()
186 "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", in __smu_cmn_reg_print_error()
192 static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90) in __smu_cmn_reg2errno() argument
198 /* The SMU is busy--still executing your command. in __smu_cmn_reg2errno()
212 /* Unknown command--ignored by the SMU. in __smu_cmn_reg2errno()
222 /* The SMU is busy with other commands. The client in __smu_cmn_reg2errno()
228 /* Unknown or debug response from the SMU. in __smu_cmn_reg2errno()
237 static void __smu_cmn_send_msg(struct smu_context *smu, in __smu_cmn_send_msg() argument
241 struct amdgpu_device *adev = smu->adev; in __smu_cmn_send_msg()
243 WREG32(smu->resp_reg, 0); in __smu_cmn_send_msg()
244 WREG32(smu->param_reg, param); in __smu_cmn_send_msg()
245 WREG32(smu->msg_reg, msg); in __smu_cmn_send_msg()
248 static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu, in __smu_cmn_get_msg_flags() argument
251 return smu->message_map[msg].flags; in __smu_cmn_get_msg_flags()
254 static int __smu_cmn_ras_filter_msg(struct smu_context *smu, in __smu_cmn_ras_filter_msg() argument
257 struct amdgpu_device *adev = smu->adev; in __smu_cmn_ras_filter_msg()
261 flags = __smu_cmn_get_msg_flags(smu, msg); in __smu_cmn_ras_filter_msg()
273 smu_get_message_name(smu, msg)); in __smu_cmn_ras_filter_msg()
285 resp = RREG32(smu->resp_reg); in __smu_cmn_ras_filter_msg()
288 smu_get_message_name(smu, msg), resp); in __smu_cmn_ras_filter_msg()
296 static int __smu_cmn_send_debug_msg(struct smu_context *smu, in __smu_cmn_send_debug_msg() argument
300 struct amdgpu_device *adev = smu->adev; in __smu_cmn_send_debug_msg()
302 WREG32(smu->debug_param_reg, param); in __smu_cmn_send_debug_msg()
303 WREG32(smu->debug_msg_reg, msg); in __smu_cmn_send_debug_msg()
304 WREG32(smu->debug_resp_reg, 0); in __smu_cmn_send_debug_msg()
310 * @smu: pointer to an SMU context
312 * @param: message parameter to send to the SMU
314 * Send a message to the SMU with the parameter passed. Do not wait
321 int smu_cmn_send_msg_without_waiting(struct smu_context *smu, in smu_cmn_send_msg_without_waiting() argument
325 struct amdgpu_device *adev = smu->adev; in smu_cmn_send_msg_without_waiting()
332 if (smu->smc_fw_state == SMU_FW_HANG) { in smu_cmn_send_msg_without_waiting()
333 dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n"); in smu_cmn_send_msg_without_waiting()
338 if (smu->smc_fw_state == SMU_FW_INIT) { in smu_cmn_send_msg_without_waiting()
339 smu->smc_fw_state = SMU_FW_RUNTIME; in smu_cmn_send_msg_without_waiting()
341 reg = __smu_cmn_poll_stat(smu); in smu_cmn_send_msg_without_waiting()
342 res = __smu_cmn_reg2errno(smu, reg); in smu_cmn_send_msg_without_waiting()
347 __smu_cmn_send_msg(smu, msg_index, param); in smu_cmn_send_msg_without_waiting()
360 * smu_cmn_wait_for_response -- wait for response from the SMU
361 * @smu: pointer to an SMU context
363 * Wait for status from the SMU.
369 int smu_cmn_wait_for_response(struct smu_context *smu) in smu_cmn_wait_for_response() argument
374 reg = __smu_cmn_poll_stat(smu); in smu_cmn_wait_for_response()
375 res = __smu_cmn_reg2errno(smu, reg); in smu_cmn_wait_for_response()
378 smu->smc_fw_state = SMU_FW_HANG; in smu_cmn_wait_for_response()
380 if (unlikely(smu->adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) && in smu_cmn_wait_for_response()
382 amdgpu_device_halt(smu->adev); in smu_cmn_wait_for_response()
391 * @smu: pointer to an SMU context
393 * @param: parameter to send to the SMU
394 * @read_arg: pointer to u32 to return a value from the SMU back
397 * Send the message @msg with parameter @param to the SMU, wait for
398 * completion of the command, and return back a value from the SMU in
407 * If we weren't able to send the message to the SMU, we also print
411 * -EREMOTEIO, indicating that the SMU returned back an
421 int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, in smu_cmn_send_smc_msg_with_param() argument
426 struct amdgpu_device *adev = smu->adev; in smu_cmn_send_smc_msg_with_param()
434 index = smu_cmn_to_asic_specific_index(smu, in smu_cmn_send_smc_msg_with_param()
440 mutex_lock(&smu->message_lock); in smu_cmn_send_smc_msg_with_param()
442 if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) { in smu_cmn_send_smc_msg_with_param()
443 res = __smu_cmn_ras_filter_msg(smu, msg, &poll); in smu_cmn_send_smc_msg_with_param()
448 if (smu->smc_fw_state == SMU_FW_HANG) { in smu_cmn_send_smc_msg_with_param()
449 dev_err(adev->dev, "SMU is in hanged state, failed to send smu message!\n"); in smu_cmn_send_smc_msg_with_param()
452 } else if (smu->smc_fw_state == SMU_FW_INIT) { in smu_cmn_send_smc_msg_with_param()
453 /* Ignore initial smu response register value */ in smu_cmn_send_smc_msg_with_param()
455 smu->smc_fw_state = SMU_FW_RUNTIME; in smu_cmn_send_smc_msg_with_param()
459 reg = __smu_cmn_poll_stat(smu); in smu_cmn_send_smc_msg_with_param()
460 res = __smu_cmn_reg2errno(smu, reg); in smu_cmn_send_smc_msg_with_param()
462 __smu_cmn_reg_print_error(smu, reg, index, param, msg); in smu_cmn_send_smc_msg_with_param()
466 __smu_cmn_send_msg(smu, (uint16_t) index, param); in smu_cmn_send_smc_msg_with_param()
467 reg = __smu_cmn_poll_stat(smu); in smu_cmn_send_smc_msg_with_param()
468 res = __smu_cmn_reg2errno(smu, reg); in smu_cmn_send_smc_msg_with_param()
471 smu->smc_fw_state = SMU_FW_HANG; in smu_cmn_send_smc_msg_with_param()
472 __smu_cmn_reg_print_error(smu, reg, index, param, msg); in smu_cmn_send_smc_msg_with_param()
475 smu_cmn_read_arg(smu, read_arg); in smu_cmn_send_smc_msg_with_param()
476 dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x, readval: 0x%08x\n", in smu_cmn_send_smc_msg_with_param()
477 smu_get_message_name(smu, msg), index, param, reg, *read_arg); in smu_cmn_send_smc_msg_with_param()
479 dev_dbg(adev->dev, "smu send message: %s(%d) param: 0x%08x, resp: 0x%08x\n", in smu_cmn_send_smc_msg_with_param()
480 smu_get_message_name(smu, msg), index, param, reg); in smu_cmn_send_smc_msg_with_param()
488 mutex_unlock(&smu->message_lock); in smu_cmn_send_smc_msg_with_param()
492 int smu_cmn_send_smc_msg(struct smu_context *smu, in smu_cmn_send_smc_msg() argument
496 return smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_send_smc_msg()
502 int smu_cmn_send_debug_smc_msg(struct smu_context *smu, in smu_cmn_send_debug_smc_msg() argument
505 return __smu_cmn_send_debug_msg(smu, msg, 0); in smu_cmn_send_debug_smc_msg()
508 int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu, in smu_cmn_send_debug_smc_msg_with_param() argument
511 return __smu_cmn_send_debug_msg(smu, msg, param); in smu_cmn_send_debug_smc_msg_with_param()
514 int smu_cmn_to_asic_specific_index(struct smu_context *smu, in smu_cmn_to_asic_specific_index() argument
524 !smu->message_map) in smu_cmn_to_asic_specific_index()
527 msg_mapping = smu->message_map[index]; in smu_cmn_to_asic_specific_index()
531 if (amdgpu_sriov_vf(smu->adev) && in smu_cmn_to_asic_specific_index()
539 !smu->clock_map) in smu_cmn_to_asic_specific_index()
542 mapping = smu->clock_map[index]; in smu_cmn_to_asic_specific_index()
550 !smu->feature_map) in smu_cmn_to_asic_specific_index()
553 mapping = smu->feature_map[index]; in smu_cmn_to_asic_specific_index()
561 !smu->table_map) in smu_cmn_to_asic_specific_index()
564 mapping = smu->table_map[index]; in smu_cmn_to_asic_specific_index()
572 !smu->pwr_src_map) in smu_cmn_to_asic_specific_index()
575 mapping = smu->pwr_src_map[index]; in smu_cmn_to_asic_specific_index()
583 !smu->workload_map) in smu_cmn_to_asic_specific_index()
586 mapping = smu->workload_map[index]; in smu_cmn_to_asic_specific_index()
597 int smu_cmn_feature_is_supported(struct smu_context *smu, in smu_cmn_feature_is_supported() argument
600 struct smu_feature *feature = &smu->smu_feature; in smu_cmn_feature_is_supported()
603 feature_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_feature_is_supported()
614 static int __smu_get_enabled_features(struct smu_context *smu, in __smu_get_enabled_features() argument
617 return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features); in __smu_get_enabled_features()
620 int smu_cmn_feature_is_enabled(struct smu_context *smu, in smu_cmn_feature_is_enabled() argument
623 struct amdgpu_device *adev = smu->adev; in smu_cmn_feature_is_enabled()
627 if (__smu_get_enabled_features(smu, &enabled_features)) { in smu_cmn_feature_is_enabled()
640 feature_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_feature_is_enabled()
649 bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, in smu_cmn_clk_dpm_is_enabled() argument
681 if (!smu_cmn_feature_is_enabled(smu, feature_id)) in smu_cmn_clk_dpm_is_enabled()
687 int smu_cmn_get_enabled_mask(struct smu_context *smu, in smu_cmn_get_enabled_mask() argument
700 index = smu_cmn_to_asic_specific_index(smu, in smu_cmn_get_enabled_mask()
704 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_get_enabled_mask()
711 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_get_enabled_mask()
716 ret = smu_cmn_send_smc_msg(smu, in smu_cmn_get_enabled_mask()
722 ret = smu_cmn_send_smc_msg(smu, in smu_cmn_get_enabled_mask()
743 int smu_cmn_feature_update_enable_state(struct smu_context *smu, in smu_cmn_feature_update_enable_state() argument
750 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_feature_update_enable_state()
756 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_feature_update_enable_state()
761 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_feature_update_enable_state()
767 ret = smu_cmn_send_smc_msg_with_param(smu, in smu_cmn_feature_update_enable_state()
776 int smu_cmn_feature_set_enabled(struct smu_context *smu, in smu_cmn_feature_set_enabled() argument
782 feature_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_feature_set_enabled()
788 return smu_cmn_feature_update_enable_state(smu, in smu_cmn_feature_set_enabled()
799 static const char *smu_get_feature_name(struct smu_context *smu, in smu_get_feature_name() argument
803 return "unknown smu feature"; in smu_get_feature_name()
807 size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu, in smu_cmn_get_pp_feature_mask() argument
816 if (__smu_get_enabled_features(smu, &feature_mask)) in smu_cmn_get_pp_feature_mask()
825 feature_index = smu_cmn_to_asic_specific_index(smu, in smu_cmn_get_pp_feature_mask()
843 smu_get_feature_name(smu, sort_feature[feature_index]), in smu_cmn_get_pp_feature_mask()
852 int smu_cmn_set_pp_feature_mask(struct smu_context *smu, in smu_cmn_set_pp_feature_mask() argument
860 ret = __smu_get_enabled_features(smu, &feature_mask); in smu_cmn_set_pp_feature_mask()
868 ret = smu_cmn_feature_update_enable_state(smu, in smu_cmn_set_pp_feature_mask()
875 ret = smu_cmn_feature_update_enable_state(smu, in smu_cmn_set_pp_feature_mask()
890 * @smu: smu_context pointer
898 int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, in smu_cmn_disable_all_features_with_exception() argument
905 skipped_feature_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_disable_all_features_with_exception()
914 return smu_cmn_feature_update_enable_state(smu, in smu_cmn_disable_all_features_with_exception()
919 int smu_cmn_get_smc_version(struct smu_context *smu, in smu_cmn_get_smc_version() argument
928 if (smu->smc_fw_if_version && smu->smc_fw_version) in smu_cmn_get_smc_version()
931 *if_version = smu->smc_fw_if_version; in smu_cmn_get_smc_version()
934 *smu_version = smu->smc_fw_version; in smu_cmn_get_smc_version()
940 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version); in smu_cmn_get_smc_version()
944 smu->smc_fw_if_version = *if_version; in smu_cmn_get_smc_version()
948 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version); in smu_cmn_get_smc_version()
952 smu->smc_fw_version = *smu_version; in smu_cmn_get_smc_version()
958 int smu_cmn_update_table(struct smu_context *smu, in smu_cmn_update_table() argument
964 struct smu_table_context *smu_table = &smu->smu_table; in smu_cmn_update_table()
965 struct amdgpu_device *adev = smu->adev; in smu_cmn_update_table()
967 int table_id = smu_cmn_to_asic_specific_index(smu, in smu_cmn_update_table()
986 ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ? in smu_cmn_update_table()
1002 int smu_cmn_write_watermarks_table(struct smu_context *smu) in smu_cmn_write_watermarks_table() argument
1004 void *watermarks_table = smu->smu_table.watermarks_table; in smu_cmn_write_watermarks_table()
1009 return smu_cmn_update_table(smu, in smu_cmn_write_watermarks_table()
1016 int smu_cmn_write_pptable(struct smu_context *smu) in smu_cmn_write_pptable() argument
1018 void *pptable = smu->smu_table.driver_pptable; in smu_cmn_write_pptable()
1020 return smu_cmn_update_table(smu, in smu_cmn_write_pptable()
1027 int smu_cmn_get_metrics_table(struct smu_context *smu, in smu_cmn_get_metrics_table() argument
1031 struct smu_table_context *smu_table = &smu->smu_table; in smu_cmn_get_metrics_table()
1039 ret = smu_cmn_update_table(smu, in smu_cmn_get_metrics_table()
1045 dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n"); in smu_cmn_get_metrics_table()
1057 int smu_cmn_get_combo_pptable(struct smu_context *smu) in smu_cmn_get_combo_pptable() argument
1059 void *pptable = smu->smu_table.combo_pptable; in smu_cmn_get_combo_pptable()
1061 return smu_cmn_update_table(smu, in smu_cmn_get_combo_pptable()
1068 int smu_cmn_set_mp1_state(struct smu_context *smu, in smu_cmn_set_mp1_state() argument
1089 ret = smu_cmn_send_smc_msg(smu, msg, NULL); in smu_cmn_set_mp1_state()
1091 dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); in smu_cmn_set_mp1_state()
1174 void smu_cmn_get_backend_workload_mask(struct smu_context *smu, in smu_cmn_get_backend_workload_mask() argument
1188 workload_type = smu_cmn_to_asic_specific_index(smu, in smu_cmn_get_backend_workload_mask()