Lines Matching full:smu
62 static int smu_force_smuclk_levels(struct smu_context *smu,
65 static int smu_handle_task(struct smu_context *smu,
68 static int smu_reset(struct smu_context *smu);
73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
75 static void smu_power_profile_mode_get(struct smu_context *smu,
77 static void smu_power_profile_mode_put(struct smu_context *smu,
83 struct smu_context *smu = handle; in smu_sys_get_pp_feature_mask() local
85 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_pp_feature_mask()
88 return smu_get_pp_feature_mask(smu, buf); in smu_sys_get_pp_feature_mask()
94 struct smu_context *smu = handle; in smu_sys_set_pp_feature_mask() local
96 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_set_pp_feature_mask()
99 return smu_set_pp_feature_mask(smu, new_mask); in smu_sys_set_pp_feature_mask()
102 int smu_set_residency_gfxoff(struct smu_context *smu, bool value) in smu_set_residency_gfxoff() argument
104 if (!smu->ppt_funcs->set_gfx_off_residency) in smu_set_residency_gfxoff()
107 return smu_set_gfx_off_residency(smu, value); in smu_set_residency_gfxoff()
110 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) in smu_get_residency_gfxoff() argument
112 if (!smu->ppt_funcs->get_gfx_off_residency) in smu_get_residency_gfxoff()
115 return smu_get_gfx_off_residency(smu, value); in smu_get_residency_gfxoff()
118 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) in smu_get_entrycount_gfxoff() argument
120 if (!smu->ppt_funcs->get_gfx_off_entrycount) in smu_get_entrycount_gfxoff()
123 return smu_get_gfx_off_entrycount(smu, value); in smu_get_entrycount_gfxoff()
126 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) in smu_get_status_gfxoff() argument
128 if (!smu->ppt_funcs->get_gfx_off_status) in smu_get_status_gfxoff()
131 *value = smu_get_gfx_off_status(smu); in smu_get_status_gfxoff()
136 int smu_set_soft_freq_range(struct smu_context *smu, in smu_set_soft_freq_range() argument
143 if (smu->ppt_funcs->set_soft_freq_limited_range) in smu_set_soft_freq_range()
144 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, in smu_set_soft_freq_range()
153 int smu_get_dpm_freq_range(struct smu_context *smu, in smu_get_dpm_freq_range() argument
163 if (smu->ppt_funcs->get_dpm_ultimate_freq) in smu_get_dpm_freq_range()
164 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, in smu_get_dpm_freq_range()
172 int smu_set_gfx_power_up_by_imu(struct smu_context *smu) in smu_set_gfx_power_up_by_imu() argument
175 struct amdgpu_device *adev = smu->adev; in smu_set_gfx_power_up_by_imu()
177 if (smu->ppt_funcs->set_gfx_power_up_by_imu) { in smu_set_gfx_power_up_by_imu()
178 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); in smu_set_gfx_power_up_by_imu()
187 struct smu_context *smu = handle; in smu_get_mclk() local
191 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, in smu_get_mclk()
201 struct smu_context *smu = handle; in smu_get_sclk() local
205 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, in smu_get_sclk()
213 static int smu_set_gfx_imu_enable(struct smu_context *smu) in smu_set_gfx_imu_enable() argument
215 struct amdgpu_device *adev = smu->adev; in smu_set_gfx_imu_enable()
220 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix) in smu_set_gfx_imu_enable()
223 return smu_set_gfx_power_up_by_imu(smu); in smu_set_gfx_imu_enable()
240 static int smu_dpm_set_vcn_enable(struct smu_context *smu, in smu_dpm_set_vcn_enable() argument
244 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_vcn_enable()
251 if (!is_vcn_enabled(smu->adev)) in smu_dpm_set_vcn_enable()
254 if (!smu->ppt_funcs->dpm_set_vcn_enable) in smu_dpm_set_vcn_enable()
260 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst); in smu_dpm_set_vcn_enable()
267 static int smu_dpm_set_jpeg_enable(struct smu_context *smu, in smu_dpm_set_jpeg_enable() argument
270 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_jpeg_enable()
274 if (!is_vcn_enabled(smu->adev)) in smu_dpm_set_jpeg_enable()
277 if (!smu->ppt_funcs->dpm_set_jpeg_enable) in smu_dpm_set_jpeg_enable()
283 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); in smu_dpm_set_jpeg_enable()
290 static int smu_dpm_set_vpe_enable(struct smu_context *smu, in smu_dpm_set_vpe_enable() argument
293 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_vpe_enable()
297 if (!smu->ppt_funcs->dpm_set_vpe_enable) in smu_dpm_set_vpe_enable()
303 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable); in smu_dpm_set_vpe_enable()
310 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, in smu_dpm_set_umsch_mm_enable() argument
313 struct smu_power_context *smu_power = &smu->smu_power; in smu_dpm_set_umsch_mm_enable()
317 if (!smu->adev->enable_umsch_mm) in smu_dpm_set_umsch_mm_enable()
320 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable) in smu_dpm_set_umsch_mm_enable()
326 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable); in smu_dpm_set_umsch_mm_enable()
333 static int smu_set_mall_enable(struct smu_context *smu) in smu_set_mall_enable() argument
337 if (!smu->ppt_funcs->set_mall_enable) in smu_set_mall_enable()
340 ret = smu->ppt_funcs->set_mall_enable(smu); in smu_set_mall_enable()
353 * This API uses no smu->mutex lock protection due to:
357 * Under this case, the smu->mutex lock protection is already enforced on
365 struct smu_context *smu = handle; in smu_dpm_set_power_gate() local
368 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { in smu_dpm_set_power_gate()
369 dev_WARN(smu->adev->dev, in smu_dpm_set_power_gate()
370 "SMU uninitialized but power %s requested for %u!\n", in smu_dpm_set_power_gate()
382 ret = smu_dpm_set_vcn_enable(smu, !gate, inst); in smu_dpm_set_power_gate()
384 dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n", in smu_dpm_set_power_gate()
388 ret = smu_gfx_off_control(smu, gate); in smu_dpm_set_power_gate()
390 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", in smu_dpm_set_power_gate()
394 ret = smu_powergate_sdma(smu, gate); in smu_dpm_set_power_gate()
396 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", in smu_dpm_set_power_gate()
400 ret = smu_dpm_set_jpeg_enable(smu, !gate); in smu_dpm_set_power_gate()
402 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", in smu_dpm_set_power_gate()
406 ret = smu_dpm_set_vpe_enable(smu, !gate); in smu_dpm_set_power_gate()
408 dev_err(smu->adev->dev, "Failed to power %s VPE!\n", in smu_dpm_set_power_gate()
412 dev_err(smu->adev->dev, "Unsupported block type!\n"); in smu_dpm_set_power_gate()
422 * @smu: smu_context pointer
427 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) in smu_set_user_clk_dependencies() argument
429 if (smu->adev->in_suspend) in smu_set_user_clk_dependencies()
433 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
434 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); in smu_set_user_clk_dependencies()
437 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) in smu_set_user_clk_dependencies()
440 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
441 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); in smu_set_user_clk_dependencies()
444 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) in smu_set_user_clk_dependencies()
447 smu->user_dpm_profile.clk_dependency = 0; in smu_set_user_clk_dependencies()
448 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); in smu_set_user_clk_dependencies()
457 * @smu: smu_context pointer
462 static void smu_restore_dpm_user_profile(struct smu_context *smu) in smu_restore_dpm_user_profile() argument
464 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_restore_dpm_user_profile()
467 if (!smu->adev->in_suspend) in smu_restore_dpm_user_profile()
470 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_restore_dpm_user_profile()
474 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; in smu_restore_dpm_user_profile()
477 if (smu->user_dpm_profile.power_limit) { in smu_restore_dpm_user_profile()
478 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); in smu_restore_dpm_user_profile()
480 dev_err(smu->adev->dev, "Failed to set power limit value\n"); in smu_restore_dpm_user_profile()
489 * Iterate over smu clk type and force the saved user clk in smu_restore_dpm_user_profile()
492 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && in smu_restore_dpm_user_profile()
493 smu->user_dpm_profile.clk_mask[clk_type]) { in smu_restore_dpm_user_profile()
494 ret = smu_force_smuclk_levels(smu, clk_type, in smu_restore_dpm_user_profile()
495 smu->user_dpm_profile.clk_mask[clk_type]); in smu_restore_dpm_user_profile()
497 dev_err(smu->adev->dev, in smu_restore_dpm_user_profile()
504 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || in smu_restore_dpm_user_profile()
505 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { in smu_restore_dpm_user_profile()
506 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); in smu_restore_dpm_user_profile()
508 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_restore_dpm_user_profile()
509 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_restore_dpm_user_profile()
510 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; in smu_restore_dpm_user_profile()
511 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); in smu_restore_dpm_user_profile()
514 if (smu->user_dpm_profile.fan_speed_pwm) { in smu_restore_dpm_user_profile()
515 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); in smu_restore_dpm_user_profile()
517 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); in smu_restore_dpm_user_profile()
520 if (smu->user_dpm_profile.fan_speed_rpm) { in smu_restore_dpm_user_profile()
521 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); in smu_restore_dpm_user_profile()
523 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); in smu_restore_dpm_user_profile()
528 if (smu->user_dpm_profile.user_od) { in smu_restore_dpm_user_profile()
529 if (smu->ppt_funcs->restore_user_od_settings) { in smu_restore_dpm_user_profile()
530 ret = smu->ppt_funcs->restore_user_od_settings(smu); in smu_restore_dpm_user_profile()
532 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); in smu_restore_dpm_user_profile()
537 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; in smu_restore_dpm_user_profile()
569 struct smu_context *smu = adev->powerplay.pp_handle; in is_support_cclk_dpm() local
571 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) in is_support_cclk_dpm()
581 struct smu_context *smu = handle; in smu_sys_get_pp_table() local
582 struct smu_table_context *smu_table = &smu->smu_table; in smu_sys_get_pp_table()
584 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_pp_table()
602 struct smu_context *smu = handle; in smu_sys_set_pp_table() local
603 struct smu_table_context *smu_table = &smu->smu_table; in smu_sys_set_pp_table()
607 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_set_pp_table()
611 dev_err(smu->adev->dev, "pp table size not matched !\n"); in smu_sys_set_pp_table()
630 smu->uploading_custom_pp_table = true; in smu_sys_set_pp_table()
632 ret = smu_reset(smu); in smu_sys_set_pp_table()
634 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); in smu_sys_set_pp_table()
636 smu->uploading_custom_pp_table = false; in smu_sys_set_pp_table()
641 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) in smu_get_driver_allowed_feature_mask() argument
643 struct smu_feature *feature = &smu->smu_feature; in smu_get_driver_allowed_feature_mask()
654 if (smu->adev->scpm_enabled) { in smu_get_driver_allowed_feature_mask()
661 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, in smu_get_driver_allowed_feature_mask()
675 struct smu_context *smu = adev->powerplay.pp_handle; in smu_set_funcs() local
678 smu->od_enabled = true; in smu_set_funcs()
684 navi10_set_ppt_funcs(smu); in smu_set_funcs()
690 sienna_cichlid_set_ppt_funcs(smu); in smu_set_funcs()
694 renoir_set_ppt_funcs(smu); in smu_set_funcs()
697 vangogh_set_ppt_funcs(smu); in smu_set_funcs()
702 yellow_carp_set_ppt_funcs(smu); in smu_set_funcs()
706 smu_v13_0_4_set_ppt_funcs(smu); in smu_set_funcs()
709 smu_v13_0_5_set_ppt_funcs(smu); in smu_set_funcs()
712 cyan_skillfish_set_ppt_funcs(smu); in smu_set_funcs()
716 arcturus_set_ppt_funcs(smu); in smu_set_funcs()
718 smu->od_enabled = false; in smu_set_funcs()
721 aldebaran_set_ppt_funcs(smu); in smu_set_funcs()
723 smu->od_enabled = true; in smu_set_funcs()
727 smu_v13_0_0_set_ppt_funcs(smu); in smu_set_funcs()
732 smu_v13_0_6_set_ppt_funcs(smu); in smu_set_funcs()
734 smu->od_enabled = true; in smu_set_funcs()
737 smu_v13_0_7_set_ppt_funcs(smu); in smu_set_funcs()
742 smu_v14_0_0_set_ppt_funcs(smu); in smu_set_funcs()
746 smu_v14_0_2_set_ppt_funcs(smu); in smu_set_funcs()
758 struct smu_context *smu; in smu_early_init() local
761 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); in smu_early_init()
762 if (!smu) in smu_early_init()
765 smu->adev = adev; in smu_early_init()
766 smu->pm_enabled = !!amdgpu_dpm; in smu_early_init()
767 smu->is_apu = false; in smu_early_init()
768 smu->smu_baco.state = SMU_BACO_STATE_NONE; in smu_early_init()
769 smu->smu_baco.platform_support = false; in smu_early_init()
770 smu->smu_baco.maco_support = false; in smu_early_init()
771 smu->user_dpm_profile.fan_mode = -1; in smu_early_init()
772 smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN; in smu_early_init()
774 mutex_init(&smu->message_lock); in smu_early_init()
776 adev->powerplay.pp_handle = smu; in smu_early_init()
782 return smu_init_microcode(smu); in smu_early_init()
785 static int smu_set_default_dpm_table(struct smu_context *smu) in smu_set_default_dpm_table() argument
787 struct amdgpu_device *adev = smu->adev; in smu_set_default_dpm_table()
788 struct smu_power_context *smu_power = &smu->smu_power; in smu_set_default_dpm_table()
793 if (!smu->ppt_funcs->set_default_dpm_table) in smu_set_default_dpm_table()
805 ret = smu_dpm_set_vcn_enable(smu, true, i); in smu_set_default_dpm_table()
812 ret = smu_dpm_set_jpeg_enable(smu, true); in smu_set_default_dpm_table()
817 ret = smu->ppt_funcs->set_default_dpm_table(smu); in smu_set_default_dpm_table()
819 dev_err(smu->adev->dev, in smu_set_default_dpm_table()
823 smu_dpm_set_jpeg_enable(smu, !jpeg_gate); in smu_set_default_dpm_table()
827 smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i); in smu_set_default_dpm_table()
833 static int smu_apply_default_config_table_settings(struct smu_context *smu) in smu_apply_default_config_table_settings() argument
835 struct amdgpu_device *adev = smu->adev; in smu_apply_default_config_table_settings()
838 ret = smu_get_default_config_table_settings(smu, in smu_apply_default_config_table_settings()
843 return smu_set_config_table(smu, &adev->pm.config_table); in smu_apply_default_config_table_settings()
849 struct smu_context *smu = adev->powerplay.pp_handle; in smu_late_init() local
852 smu_set_fine_grain_gfx_freq_parameters(smu); in smu_late_init()
854 if (!smu->pm_enabled) in smu_late_init()
857 ret = smu_post_init(smu); in smu_late_init()
859 dev_err(adev->dev, "Failed to post smu init!\n"); in smu_late_init()
871 smu_set_ac_dc(smu); in smu_late_init()
877 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { in smu_late_init()
878 ret = smu_set_default_od_settings(smu); in smu_late_init()
885 ret = smu_populate_umd_state_clk(smu); in smu_late_init()
891 ret = smu_get_asic_power_limits(smu, in smu_late_init()
892 &smu->current_power_limit, in smu_late_init()
893 &smu->default_power_limit, in smu_late_init()
894 &smu->max_power_limit, in smu_late_init()
895 &smu->min_power_limit); in smu_late_init()
902 smu_get_unique_id(smu); in smu_late_init()
904 smu_get_fan_parameters(smu); in smu_late_init()
906 smu_handle_task(smu, in smu_late_init()
907 smu->smu_dpm.dpm_level, in smu_late_init()
910 ret = smu_apply_default_config_table_settings(smu); in smu_late_init()
916 smu_restore_dpm_user_profile(smu); in smu_late_init()
921 static int smu_init_fb_allocations(struct smu_context *smu) in smu_init_fb_allocations() argument
923 struct amdgpu_device *adev = smu->adev; in smu_init_fb_allocations()
924 struct smu_table_context *smu_table = &smu->smu_table; in smu_init_fb_allocations()
987 static int smu_fini_fb_allocations(struct smu_context *smu) in smu_fini_fb_allocations() argument
989 struct smu_table_context *smu_table = &smu->smu_table; in smu_fini_fb_allocations()
1008 * @smu: amdgpu_device pointer
1015 static int smu_alloc_memory_pool(struct smu_context *smu) in smu_alloc_memory_pool() argument
1017 struct amdgpu_device *adev = smu->adev; in smu_alloc_memory_pool()
1018 struct smu_table_context *smu_table = &smu->smu_table; in smu_alloc_memory_pool()
1020 uint64_t pool_size = smu->pool_size; in smu_alloc_memory_pool()
1052 static int smu_free_memory_pool(struct smu_context *smu) in smu_free_memory_pool() argument
1054 struct smu_table_context *smu_table = &smu->smu_table; in smu_free_memory_pool()
1069 static int smu_alloc_dummy_read_table(struct smu_context *smu) in smu_alloc_dummy_read_table() argument
1071 struct smu_table_context *smu_table = &smu->smu_table; in smu_alloc_dummy_read_table()
1074 struct amdgpu_device *adev = smu->adev; in smu_alloc_dummy_read_table()
1093 static void smu_free_dummy_read_table(struct smu_context *smu) in smu_free_dummy_read_table() argument
1095 struct smu_table_context *smu_table = &smu->smu_table; in smu_free_dummy_read_table()
1107 static int smu_smc_table_sw_init(struct smu_context *smu) in smu_smc_table_sw_init() argument
1115 ret = smu_init_smc_tables(smu); in smu_smc_table_sw_init()
1117 dev_err(smu->adev->dev, "Failed to init smc tables!\n"); in smu_smc_table_sw_init()
1125 ret = smu_init_power(smu); in smu_smc_table_sw_init()
1127 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); in smu_smc_table_sw_init()
1134 ret = smu_init_fb_allocations(smu); in smu_smc_table_sw_init()
1138 ret = smu_alloc_memory_pool(smu); in smu_smc_table_sw_init()
1142 ret = smu_alloc_dummy_read_table(smu); in smu_smc_table_sw_init()
1146 ret = smu_i2c_init(smu); in smu_smc_table_sw_init()
1153 static int smu_smc_table_sw_fini(struct smu_context *smu) in smu_smc_table_sw_fini() argument
1157 smu_i2c_fini(smu); in smu_smc_table_sw_fini()
1159 smu_free_dummy_read_table(smu); in smu_smc_table_sw_fini()
1161 ret = smu_free_memory_pool(smu); in smu_smc_table_sw_fini()
1165 ret = smu_fini_fb_allocations(smu); in smu_smc_table_sw_fini()
1169 ret = smu_fini_power(smu); in smu_smc_table_sw_fini()
1171 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); in smu_smc_table_sw_fini()
1175 ret = smu_fini_smc_tables(smu); in smu_smc_table_sw_fini()
1177 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); in smu_smc_table_sw_fini()
1186 struct smu_context *smu = container_of(work, struct smu_context, in smu_throttling_logging_work_fn() local
1189 smu_log_thermal_throttling(smu); in smu_throttling_logging_work_fn()
1194 struct smu_context *smu = container_of(work, struct smu_context, in smu_interrupt_work_fn() local
1197 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) in smu_interrupt_work_fn()
1198 smu->ppt_funcs->interrupt_work(smu); in smu_interrupt_work_fn()
1203 struct smu_context *smu = in smu_swctf_delayed_work_handler() local
1206 &smu->thermal_range; in smu_swctf_delayed_work_handler()
1207 struct amdgpu_device *adev = smu->adev; in smu_swctf_delayed_work_handler()
1216 smu->ppt_funcs->read_sensor && in smu_swctf_delayed_work_handler()
1217 !smu->ppt_funcs->read_sensor(smu, in smu_swctf_delayed_work_handler()
1229 static void smu_init_xgmi_plpd_mode(struct smu_context *smu) in smu_init_xgmi_plpd_mode() argument
1231 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm); in smu_init_xgmi_plpd_mode()
1235 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD); in smu_init_xgmi_plpd_mode()
1236 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) { in smu_init_xgmi_plpd_mode()
1243 if (smu_feature_is_enabled(smu, in smu_init_xgmi_plpd_mode()
1255 static bool smu_is_workload_profile_available(struct smu_context *smu, in smu_is_workload_profile_available() argument
1260 return smu->workload_map && smu->workload_map[profile].valid_mapping; in smu_is_workload_profile_available()
1263 static void smu_init_power_profile(struct smu_context *smu) in smu_init_power_profile() argument
1265 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) { in smu_init_power_profile()
1266 if (smu->is_apu || in smu_init_power_profile()
1268 smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) in smu_init_power_profile()
1269 smu->power_profile_mode = in smu_init_power_profile()
1272 smu->power_profile_mode = in smu_init_power_profile()
1275 smu_power_profile_mode_get(smu, smu->power_profile_mode); in smu_init_power_profile()
1281 struct smu_context *smu = adev->powerplay.pp_handle; in smu_sw_init() local
1284 smu->pool_size = adev->pm.smu_prv_buffer_size; in smu_sw_init()
1285 smu->smu_feature.feature_num = SMU_FEATURE_MAX; in smu_sw_init()
1286 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); in smu_sw_init()
1287 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); in smu_sw_init()
1289 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); in smu_sw_init()
1290 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); in smu_sw_init()
1291 atomic64_set(&smu->throttle_int_counter, 0); in smu_sw_init()
1292 smu->watermarks_bitmap = 0; in smu_sw_init()
1295 atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); in smu_sw_init()
1296 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); in smu_sw_init()
1297 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); in smu_sw_init()
1298 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); in smu_sw_init()
1300 smu_init_power_profile(smu); in smu_sw_init()
1301 smu->display_config = &adev->pm.pm_display_cfg; in smu_sw_init()
1303 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; in smu_sw_init()
1304 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; in smu_sw_init()
1306 INIT_DELAYED_WORK(&smu->swctf_delayed_work, in smu_sw_init()
1309 ret = smu_smc_table_sw_init(smu); in smu_sw_init()
1316 ret = smu_get_vbios_bootup_values(smu); in smu_sw_init()
1322 ret = smu_init_pptable_microcode(smu); in smu_sw_init()
1328 ret = smu_register_irq_handler(smu); in smu_sw_init()
1335 if (!smu->ppt_funcs->get_fan_control_mode) in smu_sw_init()
1336 smu->adev->pm.no_fan = true; in smu_sw_init()
1344 struct smu_context *smu = adev->powerplay.pp_handle; in smu_sw_fini() local
1347 ret = smu_smc_table_sw_fini(smu); in smu_sw_fini()
1353 if (smu->custom_profile_params) { in smu_sw_fini()
1354 kfree(smu->custom_profile_params); in smu_sw_fini()
1355 smu->custom_profile_params = NULL; in smu_sw_fini()
1358 smu_fini_microcode(smu); in smu_sw_fini()
1363 static int smu_get_thermal_temperature_range(struct smu_context *smu) in smu_get_thermal_temperature_range() argument
1365 struct amdgpu_device *adev = smu->adev; in smu_get_thermal_temperature_range()
1367 &smu->thermal_range; in smu_get_thermal_temperature_range()
1370 if (!smu->ppt_funcs->get_thermal_temperature_range) in smu_get_thermal_temperature_range()
1373 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); in smu_get_thermal_temperature_range()
1393 * @smu: smu_context pointer
1398 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu) in smu_wbrf_handle_exclusion_ranges() argument
1402 struct amdgpu_device *adev = smu->adev; in smu_wbrf_handle_exclusion_ranges()
1451 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); in smu_wbrf_handle_exclusion_ranges()
1455 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands); in smu_wbrf_handle_exclusion_ranges()
1474 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier); in smu_wbrf_event_handler() local
1478 schedule_delayed_work(&smu->wbrf_delayed_work, in smu_wbrf_event_handler()
1497 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work); in smu_wbrf_delayed_work_handler() local
1499 smu_wbrf_handle_exclusion_ranges(smu); in smu_wbrf_delayed_work_handler()
1505 * @smu: smu_context pointer
1509 static void smu_wbrf_support_check(struct smu_context *smu) in smu_wbrf_support_check() argument
1511 struct amdgpu_device *adev = smu->adev; in smu_wbrf_support_check()
1513 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf && in smu_wbrf_support_check()
1516 if (smu->wbrf_supported) in smu_wbrf_support_check()
1523 * @smu: smu_context pointer
1529 static int smu_wbrf_init(struct smu_context *smu) in smu_wbrf_init() argument
1533 if (!smu->wbrf_supported) in smu_wbrf_init()
1536 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler); in smu_wbrf_init()
1538 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler; in smu_wbrf_init()
1539 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier); in smu_wbrf_init()
1548 schedule_delayed_work(&smu->wbrf_delayed_work, in smu_wbrf_init()
1557 * @smu: smu_context pointer
1561 static void smu_wbrf_fini(struct smu_context *smu) in smu_wbrf_fini() argument
1563 if (!smu->wbrf_supported) in smu_wbrf_fini()
1566 amd_wbrf_unregister_notifier(&smu->wbrf_notifier); in smu_wbrf_fini()
1568 cancel_delayed_work_sync(&smu->wbrf_delayed_work); in smu_wbrf_fini()
1571 static int smu_smc_hw_setup(struct smu_context *smu) in smu_smc_hw_setup() argument
1573 struct smu_feature *feature = &smu->smu_feature; in smu_smc_hw_setup()
1574 struct amdgpu_device *adev = smu->adev; in smu_smc_hw_setup()
1584 if (adev->in_suspend && smu_is_dpm_running(smu)) { in smu_smc_hw_setup()
1586 ret = smu_system_features_control(smu, true); in smu_smc_hw_setup()
1596 ret = smu_init_display_count(smu, 0); in smu_smc_hw_setup()
1602 ret = smu_set_driver_table_location(smu); in smu_smc_hw_setup()
1611 ret = smu_set_tool_table_location(smu); in smu_smc_hw_setup()
1621 ret = smu_notify_memory_pool_location(smu); in smu_smc_hw_setup()
1633 ret = smu_setup_pptable(smu); in smu_smc_hw_setup()
1640 /* smu_dump_pptable(smu); */ in smu_smc_hw_setup()
1644 * (to SMU). Driver involvement is not needed and permitted. in smu_smc_hw_setup()
1648 * Copy pptable bo in the vram to smc with SMU MSGs such as in smu_smc_hw_setup()
1651 ret = smu_write_pptable(smu); in smu_smc_hw_setup()
1659 ret = smu_run_btc(smu); in smu_smc_hw_setup()
1664 if (smu->wbrf_supported) { in smu_smc_hw_setup()
1665 ret = smu_enable_uclk_shadow(smu, true); in smu_smc_hw_setup()
1677 ret = smu_feature_set_allowed_mask(smu); in smu_smc_hw_setup()
1684 ret = smu_system_features_control(smu, true); in smu_smc_hw_setup()
1690 smu_init_xgmi_plpd_mode(smu); in smu_smc_hw_setup()
1692 ret = smu_feature_get_enabled_mask(smu, &features_supported); in smu_smc_hw_setup()
1701 if (!smu_is_dpm_running(smu)) in smu_smc_hw_setup()
1709 ret = smu_set_default_dpm_table(smu); in smu_smc_hw_setup()
1744 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); in smu_smc_hw_setup()
1750 ret = smu_get_thermal_temperature_range(smu); in smu_smc_hw_setup()
1756 ret = smu_enable_thermal_alert(smu); in smu_smc_hw_setup()
1762 ret = smu_notify_display_change(smu); in smu_smc_hw_setup()
1772 ret = smu_set_min_dcef_deep_sleep(smu, in smu_smc_hw_setup()
1773 smu->smu_table.boot_values.dcefclk / 100); in smu_smc_hw_setup()
1780 ret = smu_wbrf_init(smu); in smu_smc_hw_setup()
1787 static int smu_start_smc_engine(struct smu_context *smu) in smu_start_smc_engine() argument
1789 struct amdgpu_device *adev = smu->adev; in smu_start_smc_engine()
1792 smu->smc_fw_state = SMU_FW_INIT; in smu_start_smc_engine()
1796 if (smu->ppt_funcs->load_microcode) { in smu_start_smc_engine()
1797 ret = smu->ppt_funcs->load_microcode(smu); in smu_start_smc_engine()
1804 if (smu->ppt_funcs->check_fw_status) { in smu_start_smc_engine()
1805 ret = smu->ppt_funcs->check_fw_status(smu); in smu_start_smc_engine()
1816 ret = smu_check_fw_version(smu); in smu_start_smc_engine()
1827 struct smu_context *smu = adev->powerplay.pp_handle; in smu_hw_init() local
1830 smu->pm_enabled = false; in smu_hw_init()
1834 ret = smu_start_smc_engine(smu); in smu_hw_init()
1842 * before SMU setup starts since part of SMU configuration in smu_hw_init()
1845 smu_wbrf_support_check(smu); in smu_hw_init()
1847 if (smu->is_apu) { in smu_hw_init()
1848 ret = smu_set_gfx_imu_enable(smu); in smu_hw_init()
1852 smu_dpm_set_vcn_enable(smu, true, i); in smu_hw_init()
1853 smu_dpm_set_jpeg_enable(smu, true); in smu_hw_init()
1854 smu_dpm_set_vpe_enable(smu, true); in smu_hw_init()
1855 smu_dpm_set_umsch_mm_enable(smu, true); in smu_hw_init()
1856 smu_set_mall_enable(smu); in smu_hw_init()
1857 smu_set_gfx_cgpg(smu, true); in smu_hw_init()
1860 if (!smu->pm_enabled) in smu_hw_init()
1863 ret = smu_get_driver_allowed_feature_mask(smu); in smu_hw_init()
1867 ret = smu_smc_hw_setup(smu); in smu_hw_init()
1876 * 2. DAL settings come between .hw_init and .late_init of SMU. in smu_hw_init()
1880 ret = smu_init_max_sustainable_clocks(smu); in smu_hw_init()
1888 dev_info(adev->dev, "SMU is initialized successfully!\n"); in smu_hw_init()
1893 static int smu_disable_dpms(struct smu_context *smu) in smu_disable_dpms() argument
1895 struct amdgpu_device *adev = smu->adev; in smu_disable_dpms()
1897 bool use_baco = !smu->is_apu && in smu_disable_dpms()
1903 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) in smu_disable_dpms()
1925 * - SMU firmware can handle the DPM reenablement in smu_disable_dpms()
1928 if (smu->uploading_custom_pp_table) { in smu_disable_dpms()
1966 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix)) in smu_disable_dpms()
1973 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { in smu_disable_dpms()
1974 ret = smu_disable_all_features_with_exception(smu, in smu_disable_dpms()
1977 dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); in smu_disable_dpms()
1981 ret = smu_system_features_control(smu, false); in smu_disable_dpms()
1983 dev_err(adev->dev, "Failed to disable smu features.\n"); in smu_disable_dpms()
1987 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction. in smu_disable_dpms()
1988 * otherwise SMU will hang while interacting with RLC if RLC is halted in smu_disable_dpms()
1989 * this is a WA for Vangogh asic which fix the SMU hang issue. in smu_disable_dpms()
1991 ret = smu_notify_rlc_state(smu, false); in smu_disable_dpms()
2005 static int smu_smc_hw_cleanup(struct smu_context *smu) in smu_smc_hw_cleanup() argument
2007 struct amdgpu_device *adev = smu->adev; in smu_smc_hw_cleanup()
2010 smu_wbrf_fini(smu); in smu_smc_hw_cleanup()
2012 cancel_work_sync(&smu->throttling_logging_work); in smu_smc_hw_cleanup()
2013 cancel_work_sync(&smu->interrupt_work); in smu_smc_hw_cleanup()
2015 ret = smu_disable_thermal_alert(smu); in smu_smc_hw_cleanup()
2021 cancel_delayed_work_sync(&smu->swctf_delayed_work); in smu_smc_hw_cleanup()
2023 ret = smu_disable_dpms(smu); in smu_smc_hw_cleanup()
2032 static int smu_reset_mp1_state(struct smu_context *smu) in smu_reset_mp1_state() argument
2034 struct amdgpu_device *adev = smu->adev; in smu_reset_mp1_state()
2041 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); in smu_reset_mp1_state()
2049 struct smu_context *smu = adev->powerplay.pp_handle; in smu_hw_fini() local
2056 smu_dpm_set_vcn_enable(smu, false, i); in smu_hw_fini()
2057 smu_dpm_set_jpeg_enable(smu, false); in smu_hw_fini()
2058 smu_dpm_set_vpe_enable(smu, false); in smu_hw_fini()
2059 smu_dpm_set_umsch_mm_enable(smu, false); in smu_hw_fini()
2064 if (!smu->pm_enabled) in smu_hw_fini()
2069 ret = smu_smc_hw_cleanup(smu); in smu_hw_fini()
2073 ret = smu_reset_mp1_state(smu); in smu_hw_fini()
2083 struct smu_context *smu = adev->powerplay.pp_handle; in smu_late_fini() local
2085 kfree(smu); in smu_late_fini()
2088 static int smu_reset(struct smu_context *smu) in smu_reset() argument
2090 struct amdgpu_device *adev = smu->adev; in smu_reset()
2116 struct smu_context *smu = adev->powerplay.pp_handle; in smu_suspend() local
2123 if (!smu->pm_enabled) in smu_suspend()
2128 ret = smu_smc_hw_cleanup(smu); in smu_suspend()
2132 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); in smu_suspend()
2134 smu_set_gfx_cgpg(smu, false); in smu_suspend()
2140 ret = smu_get_entrycount_gfxoff(smu, &count); in smu_suspend()
2145 smu->workload_mask = 0; in smu_suspend()
2154 struct smu_context *smu = adev->powerplay.pp_handle; in smu_resume() local
2159 if (!smu->pm_enabled) in smu_resume()
2162 dev_info(adev->dev, "SMU is resuming...\n"); in smu_resume()
2164 ret = smu_start_smc_engine(smu); in smu_resume()
2170 ret = smu_smc_hw_setup(smu); in smu_resume()
2176 ret = smu_set_gfx_imu_enable(smu); in smu_resume()
2180 smu_set_gfx_cgpg(smu, true); in smu_resume()
2182 smu->disable_uclk_switch = 0; in smu_resume()
2186 dev_info(adev->dev, "SMU is resumed successfully!\n"); in smu_resume()
2194 struct smu_context *smu = handle; in smu_display_configuration_change() local
2196 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_configuration_change()
2202 smu_set_min_dcef_deep_sleep(smu, in smu_display_configuration_change()
2228 struct smu_context *smu = (struct smu_context*)(handle); in smu_enable_umd_pstate() local
2229 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_enable_umd_pstate()
2231 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_enable_umd_pstate()
2238 smu_gpo_control(smu, false); in smu_enable_umd_pstate()
2239 smu_gfx_ulv_control(smu, false); in smu_enable_umd_pstate()
2240 smu_deep_sleep_control(smu, false); in smu_enable_umd_pstate()
2241 amdgpu_asic_update_umd_stable_pstate(smu->adev, true); in smu_enable_umd_pstate()
2248 amdgpu_asic_update_umd_stable_pstate(smu->adev, false); in smu_enable_umd_pstate()
2249 smu_deep_sleep_control(smu, true); in smu_enable_umd_pstate()
2250 smu_gfx_ulv_control(smu, true); in smu_enable_umd_pstate()
2251 smu_gpo_control(smu, true); in smu_enable_umd_pstate()
2258 static int smu_bump_power_profile_mode(struct smu_context *smu, in smu_bump_power_profile_mode() argument
2266 if (smu->workload_refcount[i]) in smu_bump_power_profile_mode()
2270 if (smu->workload_mask == workload_mask) in smu_bump_power_profile_mode()
2273 if (smu->ppt_funcs->set_power_profile_mode) in smu_bump_power_profile_mode()
2274 ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask, in smu_bump_power_profile_mode()
2279 smu->workload_mask = workload_mask; in smu_bump_power_profile_mode()
2284 static void smu_power_profile_mode_get(struct smu_context *smu, in smu_power_profile_mode_get() argument
2287 smu->workload_refcount[profile_mode]++; in smu_power_profile_mode_get()
2290 static void smu_power_profile_mode_put(struct smu_context *smu, in smu_power_profile_mode_put() argument
2293 if (smu->workload_refcount[profile_mode]) in smu_power_profile_mode_put()
2294 smu->workload_refcount[profile_mode]--; in smu_power_profile_mode_put()
2297 static int smu_adjust_power_state_dynamic(struct smu_context *smu, in smu_adjust_power_state_dynamic() argument
2302 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_adjust_power_state_dynamic()
2305 ret = smu_display_config_changed(smu); in smu_adjust_power_state_dynamic()
2307 dev_err(smu->adev->dev, "Failed to change display config!"); in smu_adjust_power_state_dynamic()
2312 ret = smu_apply_clocks_adjust_rules(smu); in smu_adjust_power_state_dynamic()
2314 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); in smu_adjust_power_state_dynamic()
2319 ret = smu_notify_smc_display_config(smu); in smu_adjust_power_state_dynamic()
2321 dev_err(smu->adev->dev, "Failed to notify smc display config!"); in smu_adjust_power_state_dynamic()
2327 ret = smu_asic_set_performance_level(smu, level); in smu_adjust_power_state_dynamic()
2329 dev_err(smu->adev->dev, "Failed to set performance level!"); in smu_adjust_power_state_dynamic()
2339 smu_bump_power_profile_mode(smu, NULL, 0); in smu_adjust_power_state_dynamic()
2344 static int smu_handle_task(struct smu_context *smu, in smu_handle_task() argument
2350 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_handle_task()
2355 ret = smu_pre_display_config_changed(smu); in smu_handle_task()
2358 ret = smu_adjust_power_state_dynamic(smu, level, false); in smu_handle_task()
2361 ret = smu_adjust_power_state_dynamic(smu, level, true); in smu_handle_task()
2364 ret = smu_adjust_power_state_dynamic(smu, level, true); in smu_handle_task()
2377 struct smu_context *smu = handle; in smu_handle_dpm_task() local
2378 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; in smu_handle_dpm_task()
2380 return smu_handle_task(smu, smu_dpm->dpm_level, task_id); in smu_handle_dpm_task()
2388 struct smu_context *smu = handle; in smu_switch_power_profile() local
2389 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_switch_power_profile()
2392 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_switch_power_profile()
2401 smu_power_profile_mode_get(smu, type); in smu_switch_power_profile()
2403 smu_power_profile_mode_put(smu, type); in smu_switch_power_profile()
2404 ret = smu_bump_power_profile_mode(smu, NULL, 0); in smu_switch_power_profile()
2407 smu_power_profile_mode_put(smu, type); in smu_switch_power_profile()
2409 smu_power_profile_mode_get(smu, type); in smu_switch_power_profile()
2419 struct smu_context *smu = handle; in smu_get_performance_level() local
2420 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_get_performance_level()
2422 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_performance_level()
2425 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_get_performance_level()
2434 struct smu_context *smu = handle; in smu_force_performance_level() local
2435 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_force_performance_level()
2438 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_force_performance_level()
2441 if (!smu->is_apu && !smu_dpm_ctx->dpm_context) in smu_force_performance_level()
2444 ret = smu_enable_umd_pstate(smu, &level); in smu_force_performance_level()
2448 ret = smu_handle_task(smu, level, in smu_force_performance_level()
2453 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); in smu_force_performance_level()
2454 smu->user_dpm_profile.clk_dependency = 0; in smu_force_performance_level()
2462 struct smu_context *smu = handle; in smu_set_display_count() local
2464 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_display_count()
2467 return smu_init_display_count(smu, count); in smu_set_display_count()
2470 static int smu_force_smuclk_levels(struct smu_context *smu, in smu_force_smuclk_levels() argument
2474 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); in smu_force_smuclk_levels()
2477 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_force_smuclk_levels()
2481 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); in smu_force_smuclk_levels()
2485 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { in smu_force_smuclk_levels()
2486 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); in smu_force_smuclk_levels()
2487 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_force_smuclk_levels()
2488 smu->user_dpm_profile.clk_mask[clk_type] = mask; in smu_force_smuclk_levels()
2489 smu_set_user_clk_dependencies(smu, clk_type); in smu_force_smuclk_levels()
2500 struct smu_context *smu = handle; in smu_force_ppclk_levels() local
2536 return smu_force_smuclk_levels(smu, clk_type, mask); in smu_force_ppclk_levels()
2541 * flag will be cleared. So that those SMU services which
2549 struct smu_context *smu = handle; in smu_set_mp1_state() local
2552 if (!smu->pm_enabled) in smu_set_mp1_state()
2555 if (smu->ppt_funcs && in smu_set_mp1_state()
2556 smu->ppt_funcs->set_mp1_state) in smu_set_mp1_state()
2557 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); in smu_set_mp1_state()
2565 struct smu_context *smu = handle; in smu_set_df_cstate() local
2568 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_df_cstate()
2571 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) in smu_set_df_cstate()
2574 ret = smu->ppt_funcs->set_df_cstate(smu, state); in smu_set_df_cstate()
2576 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); in smu_set_df_cstate()
2581 int smu_write_watermarks_table(struct smu_context *smu) in smu_write_watermarks_table() argument
2583 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_write_watermarks_table()
2586 return smu_set_watermarks_table(smu, NULL); in smu_write_watermarks_table()
2592 struct smu_context *smu = handle; in smu_set_watermarks_for_clock_ranges() local
2594 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_watermarks_for_clock_ranges()
2597 if (smu->disable_watermark) in smu_set_watermarks_for_clock_ranges()
2600 return smu_set_watermarks_table(smu, clock_ranges); in smu_set_watermarks_for_clock_ranges()
2603 int smu_set_ac_dc(struct smu_context *smu) in smu_set_ac_dc() argument
2607 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_ac_dc()
2611 if (smu->dc_controlled_by_gpio) in smu_set_ac_dc()
2614 ret = smu_set_power_source(smu, in smu_set_ac_dc()
2615 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : in smu_set_ac_dc()
2618 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", in smu_set_ac_dc()
2619 smu->adev->pm.ac_power ? "AC" : "DC"); in smu_set_ac_dc()
2625 .name = "smu",
2677 struct smu_context *smu = handle; in smu_load_microcode() local
2678 struct amdgpu_device *adev = smu->adev; in smu_load_microcode()
2681 if (!smu->pm_enabled) in smu_load_microcode()
2688 if (smu->ppt_funcs->load_microcode) { in smu_load_microcode()
2689 ret = smu->ppt_funcs->load_microcode(smu); in smu_load_microcode()
2696 if (smu->ppt_funcs->check_fw_status) { in smu_load_microcode()
2697 ret = smu->ppt_funcs->check_fw_status(smu); in smu_load_microcode()
2707 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) in smu_set_gfx_cgpg() argument
2711 if (smu->ppt_funcs->set_gfx_cgpg) in smu_set_gfx_cgpg()
2712 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); in smu_set_gfx_cgpg()
2719 struct smu_context *smu = handle; in smu_set_fan_speed_rpm() local
2722 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_speed_rpm()
2725 if (!smu->ppt_funcs->set_fan_speed_rpm) in smu_set_fan_speed_rpm()
2731 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); in smu_set_fan_speed_rpm()
2732 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_speed_rpm()
2733 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; in smu_set_fan_speed_rpm()
2734 smu->user_dpm_profile.fan_speed_rpm = speed; in smu_set_fan_speed_rpm()
2737 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; in smu_set_fan_speed_rpm()
2738 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_set_fan_speed_rpm()
2745 * smu_get_power_limit - Request one of the SMU Power Limits
2747 * @handle: pointer to smu context
2759 struct smu_context *smu = handle; in smu_get_power_limit() local
2760 struct amdgpu_device *adev = smu->adev; in smu_get_power_limit()
2765 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_power_limit()
2797 if (smu->ppt_funcs->get_ppt_limit) in smu_get_power_limit()
2798 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); in smu_get_power_limit()
2810 ret = smu_get_asic_power_limits(smu, in smu_get_power_limit()
2811 &smu->current_power_limit, in smu_get_power_limit()
2817 *limit = smu->current_power_limit; in smu_get_power_limit()
2820 *limit = smu->default_power_limit; in smu_get_power_limit()
2823 *limit = smu->max_power_limit; in smu_get_power_limit()
2826 *limit = smu->min_power_limit; in smu_get_power_limit()
2838 struct smu_context *smu = handle; in smu_set_power_limit() local
2842 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_power_limit()
2847 if (smu->ppt_funcs->set_power_limit) in smu_set_power_limit()
2848 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); in smu_set_power_limit()
2850 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) { in smu_set_power_limit()
2851 dev_err(smu->adev->dev, in smu_set_power_limit()
2853 limit, smu->min_power_limit, smu->max_power_limit); in smu_set_power_limit()
2858 limit = smu->current_power_limit; in smu_set_power_limit()
2860 if (smu->ppt_funcs->set_power_limit) { in smu_set_power_limit()
2861 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); in smu_set_power_limit()
2862 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) in smu_set_power_limit()
2863 smu->user_dpm_profile.power_limit = limit; in smu_set_power_limit()
2869 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) in smu_print_smuclk_levels() argument
2873 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_print_smuclk_levels()
2876 if (smu->ppt_funcs->print_clk_levels) in smu_print_smuclk_levels()
2877 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); in smu_print_smuclk_levels()
2944 struct smu_context *smu = handle; in smu_print_ppclk_levels() local
2951 return smu_print_smuclk_levels(smu, clk_type, buf); in smu_print_ppclk_levels()
2956 struct smu_context *smu = handle; in smu_emit_ppclk_levels() local
2963 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_emit_ppclk_levels()
2966 if (!smu->ppt_funcs->emit_clk_levels) in smu_emit_ppclk_levels()
2969 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); in smu_emit_ppclk_levels()
2977 struct smu_context *smu = handle; in smu_od_edit_dpm_table() local
2980 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_od_edit_dpm_table()
2983 if (smu->ppt_funcs->od_edit_dpm_table) { in smu_od_edit_dpm_table()
2984 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); in smu_od_edit_dpm_table()
2995 struct smu_context *smu = handle; in smu_read_sensor() local
2996 struct amdgpu_device *adev = smu->adev; in smu_read_sensor()
2998 &smu->pstate_table; in smu_read_sensor()
3002 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_read_sensor()
3011 if (smu->ppt_funcs->read_sensor) in smu_read_sensor()
3012 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) in smu_read_sensor()
3033 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); in smu_read_sensor()
3037 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; in smu_read_sensor()
3041 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; in smu_read_sensor()
3047 if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) { in smu_read_sensor()
3074 struct smu_context *smu = handle; in smu_get_apu_thermal_limit() local
3076 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit) in smu_get_apu_thermal_limit()
3077 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit); in smu_get_apu_thermal_limit()
3085 struct smu_context *smu = handle; in smu_set_apu_thermal_limit() local
3087 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit) in smu_set_apu_thermal_limit()
3088 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit); in smu_set_apu_thermal_limit()
3095 struct smu_context *smu = handle; in smu_get_power_profile_mode() local
3097 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || in smu_get_power_profile_mode()
3098 !smu->ppt_funcs->get_power_profile_mode) in smu_get_power_profile_mode()
3103 return smu->ppt_funcs->get_power_profile_mode(smu, buf); in smu_get_power_profile_mode()
3110 struct smu_context *smu = handle; in smu_set_power_profile_mode() local
3114 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || in smu_set_power_profile_mode()
3115 !smu->ppt_funcs->set_power_profile_mode) in smu_set_power_profile_mode()
3121 smu->workload_mask = 0; in smu_set_power_profile_mode()
3124 if ((param[param_size] != smu->power_profile_mode) || custom) { in smu_set_power_profile_mode()
3126 smu_power_profile_mode_put(smu, smu->power_profile_mode); in smu_set_power_profile_mode()
3128 smu_power_profile_mode_get(smu, param[param_size]); in smu_set_power_profile_mode()
3129 ret = smu_bump_power_profile_mode(smu, in smu_set_power_profile_mode()
3133 smu_power_profile_mode_put(smu, param[param_size]); in smu_set_power_profile_mode()
3136 smu->power_profile_mode = param[param_size]; in smu_set_power_profile_mode()
3144 struct smu_context *smu = handle; in smu_get_fan_control_mode() local
3146 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_control_mode()
3149 if (!smu->ppt_funcs->get_fan_control_mode) in smu_get_fan_control_mode()
3155 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); in smu_get_fan_control_mode()
3162 struct smu_context *smu = handle; in smu_set_fan_control_mode() local
3165 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_control_mode()
3168 if (!smu->ppt_funcs->set_fan_control_mode) in smu_set_fan_control_mode()
3174 ret = smu->ppt_funcs->set_fan_control_mode(smu, value); in smu_set_fan_control_mode()
3178 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_control_mode()
3179 smu->user_dpm_profile.fan_mode = value; in smu_set_fan_control_mode()
3183 smu->user_dpm_profile.fan_speed_pwm = 0; in smu_set_fan_control_mode()
3184 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_set_fan_control_mode()
3185 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); in smu_set_fan_control_mode()
3195 struct smu_context *smu = handle; in smu_get_fan_speed_pwm() local
3198 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_speed_pwm()
3201 if (!smu->ppt_funcs->get_fan_speed_pwm) in smu_get_fan_speed_pwm()
3207 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); in smu_get_fan_speed_pwm()
3214 struct smu_context *smu = handle; in smu_set_fan_speed_pwm() local
3217 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_fan_speed_pwm()
3220 if (!smu->ppt_funcs->set_fan_speed_pwm) in smu_set_fan_speed_pwm()
3226 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); in smu_set_fan_speed_pwm()
3227 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { in smu_set_fan_speed_pwm()
3228 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; in smu_set_fan_speed_pwm()
3229 smu->user_dpm_profile.fan_speed_pwm = speed; in smu_set_fan_speed_pwm()
3232 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; in smu_set_fan_speed_pwm()
3233 smu->user_dpm_profile.fan_speed_rpm = 0; in smu_set_fan_speed_pwm()
3241 struct smu_context *smu = handle; in smu_get_fan_speed_rpm() local
3244 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_fan_speed_rpm()
3247 if (!smu->ppt_funcs->get_fan_speed_rpm) in smu_get_fan_speed_rpm()
3253 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); in smu_get_fan_speed_rpm()
3260 struct smu_context *smu = handle; in smu_set_deep_sleep_dcefclk() local
3262 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_deep_sleep_dcefclk()
3265 return smu_set_min_dcef_deep_sleep(smu, clk); in smu_set_deep_sleep_dcefclk()
3272 struct smu_context *smu = handle; in smu_get_clock_by_type_with_latency() local
3276 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_clock_by_type_with_latency()
3279 if (smu->ppt_funcs->get_clock_by_type_with_latency) { in smu_get_clock_by_type_with_latency()
3294 dev_err(smu->adev->dev, "Invalid clock type!\n"); in smu_get_clock_by_type_with_latency()
3298 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); in smu_get_clock_by_type_with_latency()
3307 struct smu_context *smu = handle; in smu_display_clock_voltage_request() local
3310 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_clock_voltage_request()
3313 if (smu->ppt_funcs->display_clock_voltage_request) in smu_display_clock_voltage_request()
3314 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); in smu_display_clock_voltage_request()
3323 struct smu_context *smu = handle; in smu_display_disable_memory_clock_switch() local
3326 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_display_disable_memory_clock_switch()
3329 if (smu->ppt_funcs->display_disable_memory_clock_switch) in smu_display_disable_memory_clock_switch()
3330 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); in smu_display_disable_memory_clock_switch()
3338 struct smu_context *smu = handle; in smu_set_xgmi_pstate() local
3341 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_set_xgmi_pstate()
3344 if (smu->ppt_funcs->set_xgmi_pstate) in smu_set_xgmi_pstate()
3345 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); in smu_set_xgmi_pstate()
3348 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); in smu_set_xgmi_pstate()
3355 struct smu_context *smu = handle; in smu_get_baco_capability() local
3357 if (!smu->pm_enabled) in smu_get_baco_capability()
3360 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support) in smu_get_baco_capability()
3363 return smu->ppt_funcs->get_bamaco_support(smu); in smu_get_baco_capability()
3368 struct smu_context *smu = handle; in smu_baco_set_state() local
3371 if (!smu->pm_enabled) in smu_baco_set_state()
3375 if (smu->ppt_funcs->baco_exit) in smu_baco_set_state()
3376 ret = smu->ppt_funcs->baco_exit(smu); in smu_baco_set_state()
3378 if (smu->ppt_funcs->baco_enter) in smu_baco_set_state()
3379 ret = smu->ppt_funcs->baco_enter(smu); in smu_baco_set_state()
3385 dev_err(smu->adev->dev, "Failed to %s BACO state!\n", in smu_baco_set_state()
3391 bool smu_mode1_reset_is_support(struct smu_context *smu) in smu_mode1_reset_is_support() argument
3395 if (!smu->pm_enabled) in smu_mode1_reset_is_support()
3398 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) in smu_mode1_reset_is_support()
3399 ret = smu->ppt_funcs->mode1_reset_is_support(smu); in smu_mode1_reset_is_support()
3404 bool smu_mode2_reset_is_support(struct smu_context *smu) in smu_mode2_reset_is_support() argument
3408 if (!smu->pm_enabled) in smu_mode2_reset_is_support()
3411 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) in smu_mode2_reset_is_support()
3412 ret = smu->ppt_funcs->mode2_reset_is_support(smu); in smu_mode2_reset_is_support()
3417 int smu_mode1_reset(struct smu_context *smu) in smu_mode1_reset() argument
3421 if (!smu->pm_enabled) in smu_mode1_reset()
3424 if (smu->ppt_funcs->mode1_reset) in smu_mode1_reset()
3425 ret = smu->ppt_funcs->mode1_reset(smu); in smu_mode1_reset()
3432 struct smu_context *smu = handle; in smu_mode2_reset() local
3435 if (!smu->pm_enabled) in smu_mode2_reset()
3438 if (smu->ppt_funcs->mode2_reset) in smu_mode2_reset()
3439 ret = smu->ppt_funcs->mode2_reset(smu); in smu_mode2_reset()
3442 dev_err(smu->adev->dev, "Mode2 reset failed!\n"); in smu_mode2_reset()
3449 struct smu_context *smu = handle; in smu_enable_gfx_features() local
3452 if (!smu->pm_enabled) in smu_enable_gfx_features()
3455 if (smu->ppt_funcs->enable_gfx_features) in smu_enable_gfx_features()
3456 ret = smu->ppt_funcs->enable_gfx_features(smu); in smu_enable_gfx_features()
3459 dev_err(smu->adev->dev, "enable gfx features failed!\n"); in smu_enable_gfx_features()
3467 struct smu_context *smu = handle; in smu_get_max_sustainable_clocks_by_dc() local
3470 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_max_sustainable_clocks_by_dc()
3473 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) in smu_get_max_sustainable_clocks_by_dc()
3474 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); in smu_get_max_sustainable_clocks_by_dc()
3483 struct smu_context *smu = handle; in smu_get_uclk_dpm_states() local
3486 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_uclk_dpm_states()
3489 if (smu->ppt_funcs->get_uclk_dpm_states) in smu_get_uclk_dpm_states()
3490 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); in smu_get_uclk_dpm_states()
3497 struct smu_context *smu = handle; in smu_get_current_power_state() local
3500 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_current_power_state()
3503 if (smu->ppt_funcs->get_current_power_state) in smu_get_current_power_state()
3504 pm_state = smu->ppt_funcs->get_current_power_state(smu); in smu_get_current_power_state()
3512 struct smu_context *smu = handle; in smu_get_dpm_clock_table() local
3515 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_get_dpm_clock_table()
3518 if (smu->ppt_funcs->get_dpm_clock_table) in smu_get_dpm_clock_table()
3519 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); in smu_get_dpm_clock_table()
3526 struct smu_context *smu = handle; in smu_sys_get_gpu_metrics() local
3528 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_gpu_metrics()
3531 if (!smu->ppt_funcs->get_gpu_metrics) in smu_sys_get_gpu_metrics()
3534 return smu->ppt_funcs->get_gpu_metrics(smu, table); in smu_sys_get_gpu_metrics()
3540 struct smu_context *smu = handle; in smu_sys_get_pm_metrics() local
3542 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_sys_get_pm_metrics()
3545 if (!smu->ppt_funcs->get_pm_metrics) in smu_sys_get_pm_metrics()
3548 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size); in smu_sys_get_pm_metrics()
3553 struct smu_context *smu = handle; in smu_enable_mgpu_fan_boost() local
3556 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) in smu_enable_mgpu_fan_boost()
3559 if (smu->ppt_funcs->enable_mgpu_fan_boost) in smu_enable_mgpu_fan_boost()
3560 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); in smu_enable_mgpu_fan_boost()
3568 struct smu_context *smu = handle; in smu_gfx_state_change_set() local
3571 if (smu->ppt_funcs->gfx_state_change_set) in smu_gfx_state_change_set()
3572 ret = smu->ppt_funcs->gfx_state_change_set(smu, state); in smu_gfx_state_change_set()
3577 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) in smu_handle_passthrough_sbr() argument
3581 if (smu->ppt_funcs->smu_handle_passthrough_sbr) in smu_handle_passthrough_sbr()
3582 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); in smu_handle_passthrough_sbr()
3587 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) in smu_get_ecc_info() argument
3591 if (smu->ppt_funcs && in smu_get_ecc_info()
3592 smu->ppt_funcs->get_ecc_info) in smu_get_ecc_info()
3593 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); in smu_get_ecc_info()
3601 struct smu_context *smu = handle; in smu_get_prv_buffer_details() local
3602 struct smu_table_context *smu_table = &smu->smu_table; in smu_get_prv_buffer_details()
3638 ssize_t smu_get_pm_policy_info(struct smu_context *smu, in smu_get_pm_policy_info() argument
3641 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; in smu_get_pm_policy_info()
3647 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || in smu_get_pm_policy_info()
3654 dpm_policy = smu_get_pm_policy(smu, p_type); in smu_get_pm_policy_info()
3666 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu, in smu_get_pm_policy() argument
3669 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; in smu_get_pm_policy()
3685 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type, in smu_set_pm_policy() argument
3688 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm; in smu_set_pm_policy()
3694 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt || in smu_set_pm_policy()
3701 dpm_policy = smu_get_pm_policy(smu, p_type); in smu_set_pm_policy()
3709 ret = dpm_policy->set_policy(smu, level); in smu_set_pm_policy()
3776 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, in smu_wait_for_event() argument
3781 if (smu->ppt_funcs->wait_for_event) in smu_wait_for_event()
3782 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); in smu_wait_for_event()
3787 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) in smu_stb_collect_info() argument
3790 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) in smu_stb_collect_info()
3794 if (size != smu->stb_context.stb_buf_size) in smu_stb_collect_info()
3798 * No need to lock smu mutex as we access STB directly through MMIO in smu_stb_collect_info()
3799 * and not going through SMU messaging route (for now at least). in smu_stb_collect_info()
3802 return smu->ppt_funcs->stb_collect_info(smu, buf, size); in smu_stb_collect_info()
3810 struct smu_context *smu = adev->powerplay.pp_handle; in smu_stb_debugfs_open() local
3814 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); in smu_stb_debugfs_open()
3818 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); in smu_stb_debugfs_open()
3835 struct smu_context *smu = adev->powerplay.pp_handle; in smu_stb_debugfs_read() local
3844 smu->stb_context.stb_buf_size); in smu_stb_debugfs_read()
3876 struct smu_context *smu = adev->powerplay.pp_handle; in amdgpu_smu_stb_debug_fs_init() local
3878 if (!smu || (!smu->stb_context.stb_buf_size)) in amdgpu_smu_stb_debug_fs_init()
3886 smu->stb_context.stb_buf_size); in amdgpu_smu_stb_debug_fs_init()
3890 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) in smu_send_hbm_bad_pages_num() argument
3894 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) in smu_send_hbm_bad_pages_num()
3895 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); in smu_send_hbm_bad_pages_num()
3900 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) in smu_send_hbm_bad_channel_flag() argument
3904 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) in smu_send_hbm_bad_channel_flag()
3905 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); in smu_send_hbm_bad_channel_flag()
3910 int smu_send_rma_reason(struct smu_context *smu) in smu_send_rma_reason() argument
3914 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason) in smu_send_rma_reason()
3915 ret = smu->ppt_funcs->send_rma_reason(smu); in smu_send_rma_reason()
3920 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) in smu_reset_sdma() argument
3924 if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma) in smu_reset_sdma()
3925 ret = smu->ppt_funcs->reset_sdma(smu, inst_mask); in smu_reset_sdma()