1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L1
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "smu_v14_0_0_ppt.h"
48 #include "smu_v14_0_2_ppt.h"
49 #include "amd_pcie.h"
50
51 /*
52 * DO NOT use these for err/warn/info/debug messages.
53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54 * They are more MGPU friendly.
55 */
56 #undef pr_err
57 #undef pr_warn
58 #undef pr_info
59 #undef pr_debug
60
61 static const struct amd_pm_funcs swsmu_pm_funcs;
62 static int smu_force_smuclk_levels(struct smu_context *smu,
63 enum smu_clk_type clk_type,
64 uint32_t mask);
65 static int smu_handle_task(struct smu_context *smu,
66 enum amd_dpm_forced_level level,
67 enum amd_pp_task task_id);
68 static int smu_reset(struct smu_context *smu);
69 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70 static int smu_set_fan_control_mode(void *handle, u32 value);
71 static int smu_set_power_limit(void *handle, uint32_t limit);
72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75 static void smu_power_profile_mode_get(struct smu_context *smu,
76 enum PP_SMC_POWER_PROFILE profile_mode);
77 static void smu_power_profile_mode_put(struct smu_context *smu,
78 enum PP_SMC_POWER_PROFILE profile_mode);
79
smu_sys_get_pp_feature_mask(void * handle,char * buf)80 static int smu_sys_get_pp_feature_mask(void *handle,
81 char *buf)
82 {
83 struct smu_context *smu = handle;
84
85 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
86 return -EOPNOTSUPP;
87
88 return smu_get_pp_feature_mask(smu, buf);
89 }
90
smu_sys_set_pp_feature_mask(void * handle,uint64_t new_mask)91 static int smu_sys_set_pp_feature_mask(void *handle,
92 uint64_t new_mask)
93 {
94 struct smu_context *smu = handle;
95
96 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
97 return -EOPNOTSUPP;
98
99 return smu_set_pp_feature_mask(smu, new_mask);
100 }
101
smu_set_residency_gfxoff(struct smu_context * smu,bool value)102 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
103 {
104 if (!smu->ppt_funcs->set_gfx_off_residency)
105 return -EINVAL;
106
107 return smu_set_gfx_off_residency(smu, value);
108 }
109
smu_get_residency_gfxoff(struct smu_context * smu,u32 * value)110 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
111 {
112 if (!smu->ppt_funcs->get_gfx_off_residency)
113 return -EINVAL;
114
115 return smu_get_gfx_off_residency(smu, value);
116 }
117
smu_get_entrycount_gfxoff(struct smu_context * smu,u64 * value)118 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
119 {
120 if (!smu->ppt_funcs->get_gfx_off_entrycount)
121 return -EINVAL;
122
123 return smu_get_gfx_off_entrycount(smu, value);
124 }
125
smu_get_status_gfxoff(struct smu_context * smu,uint32_t * value)126 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
127 {
128 if (!smu->ppt_funcs->get_gfx_off_status)
129 return -EINVAL;
130
131 *value = smu_get_gfx_off_status(smu);
132
133 return 0;
134 }
135
smu_set_soft_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)136 int smu_set_soft_freq_range(struct smu_context *smu,
137 enum smu_clk_type clk_type,
138 uint32_t min,
139 uint32_t max)
140 {
141 int ret = 0;
142
143 if (smu->ppt_funcs->set_soft_freq_limited_range)
144 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
145 clk_type,
146 min,
147 max,
148 false);
149
150 return ret;
151 }
152
smu_get_dpm_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)153 int smu_get_dpm_freq_range(struct smu_context *smu,
154 enum smu_clk_type clk_type,
155 uint32_t *min,
156 uint32_t *max)
157 {
158 int ret = -ENOTSUPP;
159
160 if (!min && !max)
161 return -EINVAL;
162
163 if (smu->ppt_funcs->get_dpm_ultimate_freq)
164 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
165 clk_type,
166 min,
167 max);
168
169 return ret;
170 }
171
smu_set_gfx_power_up_by_imu(struct smu_context * smu)172 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
173 {
174 int ret = 0;
175 struct amdgpu_device *adev = smu->adev;
176
177 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
178 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
179 if (ret)
180 dev_err(adev->dev, "Failed to enable gfx imu!\n");
181 }
182 return ret;
183 }
184
smu_get_mclk(void * handle,bool low)185 static u32 smu_get_mclk(void *handle, bool low)
186 {
187 struct smu_context *smu = handle;
188 uint32_t clk_freq;
189 int ret = 0;
190
191 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
192 low ? &clk_freq : NULL,
193 !low ? &clk_freq : NULL);
194 if (ret)
195 return 0;
196 return clk_freq * 100;
197 }
198
smu_get_sclk(void * handle,bool low)199 static u32 smu_get_sclk(void *handle, bool low)
200 {
201 struct smu_context *smu = handle;
202 uint32_t clk_freq;
203 int ret = 0;
204
205 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
206 low ? &clk_freq : NULL,
207 !low ? &clk_freq : NULL);
208 if (ret)
209 return 0;
210 return clk_freq * 100;
211 }
212
smu_set_gfx_imu_enable(struct smu_context * smu)213 static int smu_set_gfx_imu_enable(struct smu_context *smu)
214 {
215 struct amdgpu_device *adev = smu->adev;
216
217 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
218 return 0;
219
220 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
221 return 0;
222
223 return smu_set_gfx_power_up_by_imu(smu);
224 }
225
is_vcn_enabled(struct amdgpu_device * adev)226 static bool is_vcn_enabled(struct amdgpu_device *adev)
227 {
228 int i;
229
230 for (i = 0; i < adev->num_ip_blocks; i++) {
231 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
232 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
233 !adev->ip_blocks[i].status.valid)
234 return false;
235 }
236
237 return true;
238 }
239
smu_dpm_set_vcn_enable(struct smu_context * smu,bool enable)240 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
241 bool enable)
242 {
243 struct smu_power_context *smu_power = &smu->smu_power;
244 struct smu_power_gate *power_gate = &smu_power->power_gate;
245 int ret = 0;
246
247 /*
248 * don't poweron vcn/jpeg when they are skipped.
249 */
250 if (!is_vcn_enabled(smu->adev))
251 return 0;
252
253 if (!smu->ppt_funcs->dpm_set_vcn_enable)
254 return 0;
255
256 if (atomic_read(&power_gate->vcn_gated) ^ enable)
257 return 0;
258
259 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, 0xff);
260 if (!ret)
261 atomic_set(&power_gate->vcn_gated, !enable);
262
263 return ret;
264 }
265
smu_dpm_set_jpeg_enable(struct smu_context * smu,bool enable)266 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
267 bool enable)
268 {
269 struct smu_power_context *smu_power = &smu->smu_power;
270 struct smu_power_gate *power_gate = &smu_power->power_gate;
271 int ret = 0;
272
273 if (!is_vcn_enabled(smu->adev))
274 return 0;
275
276 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
277 return 0;
278
279 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
280 return 0;
281
282 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
283 if (!ret)
284 atomic_set(&power_gate->jpeg_gated, !enable);
285
286 return ret;
287 }
288
smu_dpm_set_vpe_enable(struct smu_context * smu,bool enable)289 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
290 bool enable)
291 {
292 struct smu_power_context *smu_power = &smu->smu_power;
293 struct smu_power_gate *power_gate = &smu_power->power_gate;
294 int ret = 0;
295
296 if (!smu->ppt_funcs->dpm_set_vpe_enable)
297 return 0;
298
299 if (atomic_read(&power_gate->vpe_gated) ^ enable)
300 return 0;
301
302 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
303 if (!ret)
304 atomic_set(&power_gate->vpe_gated, !enable);
305
306 return ret;
307 }
308
smu_dpm_set_umsch_mm_enable(struct smu_context * smu,bool enable)309 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
310 bool enable)
311 {
312 struct smu_power_context *smu_power = &smu->smu_power;
313 struct smu_power_gate *power_gate = &smu_power->power_gate;
314 int ret = 0;
315
316 if (!smu->adev->enable_umsch_mm)
317 return 0;
318
319 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
320 return 0;
321
322 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
323 return 0;
324
325 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
326 if (!ret)
327 atomic_set(&power_gate->umsch_mm_gated, !enable);
328
329 return ret;
330 }
331
smu_set_mall_enable(struct smu_context * smu)332 static int smu_set_mall_enable(struct smu_context *smu)
333 {
334 int ret = 0;
335
336 if (!smu->ppt_funcs->set_mall_enable)
337 return 0;
338
339 ret = smu->ppt_funcs->set_mall_enable(smu);
340
341 return ret;
342 }
343
344 /**
345 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
346 *
347 * @handle: smu_context pointer
348 * @block_type: the IP block to power gate/ungate
349 * @gate: to power gate if true, ungate otherwise
350 *
351 * This API uses no smu->mutex lock protection due to:
352 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
353 * This is guarded to be race condition free by the caller.
354 * 2. Or get called on user setting request of power_dpm_force_performance_level.
355 * Under this case, the smu->mutex lock protection is already enforced on
356 * the parent API smu_force_performance_level of the call path.
357 */
smu_dpm_set_power_gate(void * handle,uint32_t block_type,bool gate)358 static int smu_dpm_set_power_gate(void *handle,
359 uint32_t block_type,
360 bool gate)
361 {
362 struct smu_context *smu = handle;
363 int ret = 0;
364
365 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
366 dev_WARN(smu->adev->dev,
367 "SMU uninitialized but power %s requested for %u!\n",
368 gate ? "gate" : "ungate", block_type);
369 return -EOPNOTSUPP;
370 }
371
372 switch (block_type) {
373 /*
374 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
375 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
376 */
377 case AMD_IP_BLOCK_TYPE_UVD:
378 case AMD_IP_BLOCK_TYPE_VCN:
379 ret = smu_dpm_set_vcn_enable(smu, !gate);
380 if (ret)
381 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
382 gate ? "gate" : "ungate");
383 break;
384 case AMD_IP_BLOCK_TYPE_GFX:
385 ret = smu_gfx_off_control(smu, gate);
386 if (ret)
387 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
388 gate ? "enable" : "disable");
389 break;
390 case AMD_IP_BLOCK_TYPE_SDMA:
391 ret = smu_powergate_sdma(smu, gate);
392 if (ret)
393 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
394 gate ? "gate" : "ungate");
395 break;
396 case AMD_IP_BLOCK_TYPE_JPEG:
397 ret = smu_dpm_set_jpeg_enable(smu, !gate);
398 if (ret)
399 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
400 gate ? "gate" : "ungate");
401 break;
402 case AMD_IP_BLOCK_TYPE_VPE:
403 ret = smu_dpm_set_vpe_enable(smu, !gate);
404 if (ret)
405 dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
406 gate ? "gate" : "ungate");
407 break;
408 default:
409 dev_err(smu->adev->dev, "Unsupported block type!\n");
410 return -EINVAL;
411 }
412
413 return ret;
414 }
415
416 /**
417 * smu_set_user_clk_dependencies - set user profile clock dependencies
418 *
419 * @smu: smu_context pointer
420 * @clk: enum smu_clk_type type
421 *
422 * Enable/Disable the clock dependency for the @clk type.
423 */
smu_set_user_clk_dependencies(struct smu_context * smu,enum smu_clk_type clk)424 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
425 {
426 if (smu->adev->in_suspend)
427 return;
428
429 if (clk == SMU_MCLK) {
430 smu->user_dpm_profile.clk_dependency = 0;
431 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
432 } else if (clk == SMU_FCLK) {
433 /* MCLK takes precedence over FCLK */
434 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
435 return;
436
437 smu->user_dpm_profile.clk_dependency = 0;
438 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
439 } else if (clk == SMU_SOCCLK) {
440 /* MCLK takes precedence over SOCCLK */
441 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
442 return;
443
444 smu->user_dpm_profile.clk_dependency = 0;
445 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
446 } else
447 /* Add clk dependencies here, if any */
448 return;
449 }
450
451 /**
452 * smu_restore_dpm_user_profile - reinstate user dpm profile
453 *
454 * @smu: smu_context pointer
455 *
456 * Restore the saved user power configurations include power limit,
457 * clock frequencies, fan control mode and fan speed.
458 */
smu_restore_dpm_user_profile(struct smu_context * smu)459 static void smu_restore_dpm_user_profile(struct smu_context *smu)
460 {
461 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
462 int ret = 0;
463
464 if (!smu->adev->in_suspend)
465 return;
466
467 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
468 return;
469
470 /* Enable restore flag */
471 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
472
473 /* set the user dpm power limit */
474 if (smu->user_dpm_profile.power_limit) {
475 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
476 if (ret)
477 dev_err(smu->adev->dev, "Failed to set power limit value\n");
478 }
479
480 /* set the user dpm clock configurations */
481 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
482 enum smu_clk_type clk_type;
483
484 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
485 /*
486 * Iterate over smu clk type and force the saved user clk
487 * configs, skip if clock dependency is enabled
488 */
489 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
490 smu->user_dpm_profile.clk_mask[clk_type]) {
491 ret = smu_force_smuclk_levels(smu, clk_type,
492 smu->user_dpm_profile.clk_mask[clk_type]);
493 if (ret)
494 dev_err(smu->adev->dev,
495 "Failed to set clock type = %d\n", clk_type);
496 }
497 }
498 }
499
500 /* set the user dpm fan configurations */
501 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
502 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
503 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
504 if (ret != -EOPNOTSUPP) {
505 smu->user_dpm_profile.fan_speed_pwm = 0;
506 smu->user_dpm_profile.fan_speed_rpm = 0;
507 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
508 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
509 }
510
511 if (smu->user_dpm_profile.fan_speed_pwm) {
512 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
513 if (ret != -EOPNOTSUPP)
514 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
515 }
516
517 if (smu->user_dpm_profile.fan_speed_rpm) {
518 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
519 if (ret != -EOPNOTSUPP)
520 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
521 }
522 }
523
524 /* Restore user customized OD settings */
525 if (smu->user_dpm_profile.user_od) {
526 if (smu->ppt_funcs->restore_user_od_settings) {
527 ret = smu->ppt_funcs->restore_user_od_settings(smu);
528 if (ret)
529 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
530 }
531 }
532
533 /* Disable restore flag */
534 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
535 }
536
smu_get_power_num_states(void * handle,struct pp_states_info * state_info)537 static int smu_get_power_num_states(void *handle,
538 struct pp_states_info *state_info)
539 {
540 if (!state_info)
541 return -EINVAL;
542
543 /* not support power state */
544 memset(state_info, 0, sizeof(struct pp_states_info));
545 state_info->nums = 1;
546 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
547
548 return 0;
549 }
550
is_support_sw_smu(struct amdgpu_device * adev)551 bool is_support_sw_smu(struct amdgpu_device *adev)
552 {
553 /* vega20 is 11.0.2, but it's supported via the powerplay code */
554 if (adev->asic_type == CHIP_VEGA20)
555 return false;
556
557 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
558 amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
559 return true;
560
561 return false;
562 }
563
is_support_cclk_dpm(struct amdgpu_device * adev)564 bool is_support_cclk_dpm(struct amdgpu_device *adev)
565 {
566 struct smu_context *smu = adev->powerplay.pp_handle;
567
568 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
569 return false;
570
571 return true;
572 }
573
574
smu_sys_get_pp_table(void * handle,char ** table)575 static int smu_sys_get_pp_table(void *handle,
576 char **table)
577 {
578 struct smu_context *smu = handle;
579 struct smu_table_context *smu_table = &smu->smu_table;
580
581 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
582 return -EOPNOTSUPP;
583
584 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
585 return -EINVAL;
586
587 if (smu_table->hardcode_pptable)
588 *table = smu_table->hardcode_pptable;
589 else
590 *table = smu_table->power_play_table;
591
592 return smu_table->power_play_table_size;
593 }
594
smu_sys_set_pp_table(void * handle,const char * buf,size_t size)595 static int smu_sys_set_pp_table(void *handle,
596 const char *buf,
597 size_t size)
598 {
599 struct smu_context *smu = handle;
600 struct smu_table_context *smu_table = &smu->smu_table;
601 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
602 int ret = 0;
603
604 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
605 return -EOPNOTSUPP;
606
607 if (header->usStructureSize != size) {
608 dev_err(smu->adev->dev, "pp table size not matched !\n");
609 return -EIO;
610 }
611
612 if (!smu_table->hardcode_pptable) {
613 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
614 if (!smu_table->hardcode_pptable)
615 return -ENOMEM;
616 }
617
618 memcpy(smu_table->hardcode_pptable, buf, size);
619 smu_table->power_play_table = smu_table->hardcode_pptable;
620 smu_table->power_play_table_size = size;
621
622 /*
623 * Special hw_fini action(for Navi1x, the DPMs disablement will be
624 * skipped) may be needed for custom pptable uploading.
625 */
626 smu->uploading_custom_pp_table = true;
627
628 ret = smu_reset(smu);
629 if (ret)
630 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
631
632 smu->uploading_custom_pp_table = false;
633
634 return ret;
635 }
636
smu_get_driver_allowed_feature_mask(struct smu_context * smu)637 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
638 {
639 struct smu_feature *feature = &smu->smu_feature;
640 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
641 int ret = 0;
642
643 /*
644 * With SCPM enabled, the allowed featuremasks setting(via
645 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
646 * That means there is no way to let PMFW knows the settings below.
647 * Thus, we just assume all the features are allowed under
648 * such scenario.
649 */
650 if (smu->adev->scpm_enabled) {
651 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
652 return 0;
653 }
654
655 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
656
657 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
658 SMU_FEATURE_MAX/32);
659 if (ret)
660 return ret;
661
662 bitmap_or(feature->allowed, feature->allowed,
663 (unsigned long *)allowed_feature_mask,
664 feature->feature_num);
665
666 return ret;
667 }
668
smu_set_funcs(struct amdgpu_device * adev)669 static int smu_set_funcs(struct amdgpu_device *adev)
670 {
671 struct smu_context *smu = adev->powerplay.pp_handle;
672
673 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
674 smu->od_enabled = true;
675
676 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
677 case IP_VERSION(11, 0, 0):
678 case IP_VERSION(11, 0, 5):
679 case IP_VERSION(11, 0, 9):
680 navi10_set_ppt_funcs(smu);
681 break;
682 case IP_VERSION(11, 0, 7):
683 case IP_VERSION(11, 0, 11):
684 case IP_VERSION(11, 0, 12):
685 case IP_VERSION(11, 0, 13):
686 sienna_cichlid_set_ppt_funcs(smu);
687 break;
688 case IP_VERSION(12, 0, 0):
689 case IP_VERSION(12, 0, 1):
690 renoir_set_ppt_funcs(smu);
691 break;
692 case IP_VERSION(11, 5, 0):
693 vangogh_set_ppt_funcs(smu);
694 break;
695 case IP_VERSION(13, 0, 1):
696 case IP_VERSION(13, 0, 3):
697 case IP_VERSION(13, 0, 8):
698 yellow_carp_set_ppt_funcs(smu);
699 break;
700 case IP_VERSION(13, 0, 4):
701 case IP_VERSION(13, 0, 11):
702 smu_v13_0_4_set_ppt_funcs(smu);
703 break;
704 case IP_VERSION(13, 0, 5):
705 smu_v13_0_5_set_ppt_funcs(smu);
706 break;
707 case IP_VERSION(11, 0, 8):
708 cyan_skillfish_set_ppt_funcs(smu);
709 break;
710 case IP_VERSION(11, 0, 2):
711 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
712 arcturus_set_ppt_funcs(smu);
713 /* OD is not supported on Arcturus */
714 smu->od_enabled = false;
715 break;
716 case IP_VERSION(13, 0, 2):
717 aldebaran_set_ppt_funcs(smu);
718 /* Enable pp_od_clk_voltage node */
719 smu->od_enabled = true;
720 break;
721 case IP_VERSION(13, 0, 0):
722 case IP_VERSION(13, 0, 10):
723 smu_v13_0_0_set_ppt_funcs(smu);
724 break;
725 case IP_VERSION(13, 0, 6):
726 case IP_VERSION(13, 0, 14):
727 smu_v13_0_6_set_ppt_funcs(smu);
728 /* Enable pp_od_clk_voltage node */
729 smu->od_enabled = true;
730 break;
731 case IP_VERSION(13, 0, 7):
732 smu_v13_0_7_set_ppt_funcs(smu);
733 break;
734 case IP_VERSION(14, 0, 0):
735 case IP_VERSION(14, 0, 1):
736 case IP_VERSION(14, 0, 4):
737 smu_v14_0_0_set_ppt_funcs(smu);
738 break;
739 case IP_VERSION(14, 0, 2):
740 case IP_VERSION(14, 0, 3):
741 smu_v14_0_2_set_ppt_funcs(smu);
742 break;
743 default:
744 return -EINVAL;
745 }
746
747 return 0;
748 }
749
smu_early_init(struct amdgpu_ip_block * ip_block)750 static int smu_early_init(struct amdgpu_ip_block *ip_block)
751 {
752 struct amdgpu_device *adev = ip_block->adev;
753 struct smu_context *smu;
754 int r;
755
756 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
757 if (!smu)
758 return -ENOMEM;
759
760 smu->adev = adev;
761 smu->pm_enabled = !!amdgpu_dpm;
762 smu->is_apu = false;
763 smu->smu_baco.state = SMU_BACO_STATE_NONE;
764 smu->smu_baco.platform_support = false;
765 smu->smu_baco.maco_support = false;
766 smu->user_dpm_profile.fan_mode = -1;
767 smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
768
769 mutex_init(&smu->message_lock);
770
771 adev->powerplay.pp_handle = smu;
772 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
773
774 r = smu_set_funcs(adev);
775 if (r)
776 return r;
777 return smu_init_microcode(smu);
778 }
779
smu_set_default_dpm_table(struct smu_context * smu)780 static int smu_set_default_dpm_table(struct smu_context *smu)
781 {
782 struct amdgpu_device *adev = smu->adev;
783 struct smu_power_context *smu_power = &smu->smu_power;
784 struct smu_power_gate *power_gate = &smu_power->power_gate;
785 int vcn_gate, jpeg_gate;
786 int ret = 0;
787
788 if (!smu->ppt_funcs->set_default_dpm_table)
789 return 0;
790
791 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
792 vcn_gate = atomic_read(&power_gate->vcn_gated);
793 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
794 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
795
796 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
797 ret = smu_dpm_set_vcn_enable(smu, true);
798 if (ret)
799 return ret;
800 }
801
802 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
803 ret = smu_dpm_set_jpeg_enable(smu, true);
804 if (ret)
805 goto err_out;
806 }
807
808 ret = smu->ppt_funcs->set_default_dpm_table(smu);
809 if (ret)
810 dev_err(smu->adev->dev,
811 "Failed to setup default dpm clock tables!\n");
812
813 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
814 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
815 err_out:
816 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
817 smu_dpm_set_vcn_enable(smu, !vcn_gate);
818
819 return ret;
820 }
821
smu_apply_default_config_table_settings(struct smu_context * smu)822 static int smu_apply_default_config_table_settings(struct smu_context *smu)
823 {
824 struct amdgpu_device *adev = smu->adev;
825 int ret = 0;
826
827 ret = smu_get_default_config_table_settings(smu,
828 &adev->pm.config_table);
829 if (ret)
830 return ret;
831
832 return smu_set_config_table(smu, &adev->pm.config_table);
833 }
834
smu_late_init(struct amdgpu_ip_block * ip_block)835 static int smu_late_init(struct amdgpu_ip_block *ip_block)
836 {
837 struct amdgpu_device *adev = ip_block->adev;
838 struct smu_context *smu = adev->powerplay.pp_handle;
839 int ret = 0;
840
841 smu_set_fine_grain_gfx_freq_parameters(smu);
842
843 if (!smu->pm_enabled)
844 return 0;
845
846 ret = smu_post_init(smu);
847 if (ret) {
848 dev_err(adev->dev, "Failed to post smu init!\n");
849 return ret;
850 }
851
852 /*
853 * Explicitly notify PMFW the power mode the system in. Since
854 * the PMFW may boot the ASIC with a different mode.
855 * For those supporting ACDC switch via gpio, PMFW will
856 * handle the switch automatically. Driver involvement
857 * is unnecessary.
858 */
859 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
860 smu_set_ac_dc(smu);
861
862 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
863 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
864 return 0;
865
866 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
867 ret = smu_set_default_od_settings(smu);
868 if (ret) {
869 dev_err(adev->dev, "Failed to setup default OD settings!\n");
870 return ret;
871 }
872 }
873
874 ret = smu_populate_umd_state_clk(smu);
875 if (ret) {
876 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
877 return ret;
878 }
879
880 ret = smu_get_asic_power_limits(smu,
881 &smu->current_power_limit,
882 &smu->default_power_limit,
883 &smu->max_power_limit,
884 &smu->min_power_limit);
885 if (ret) {
886 dev_err(adev->dev, "Failed to get asic power limits!\n");
887 return ret;
888 }
889
890 if (!amdgpu_sriov_vf(adev))
891 smu_get_unique_id(smu);
892
893 smu_get_fan_parameters(smu);
894
895 smu_handle_task(smu,
896 smu->smu_dpm.dpm_level,
897 AMD_PP_TASK_COMPLETE_INIT);
898
899 ret = smu_apply_default_config_table_settings(smu);
900 if (ret && (ret != -EOPNOTSUPP)) {
901 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
902 return ret;
903 }
904
905 smu_restore_dpm_user_profile(smu);
906
907 return 0;
908 }
909
smu_init_fb_allocations(struct smu_context * smu)910 static int smu_init_fb_allocations(struct smu_context *smu)
911 {
912 struct amdgpu_device *adev = smu->adev;
913 struct smu_table_context *smu_table = &smu->smu_table;
914 struct smu_table *tables = smu_table->tables;
915 struct smu_table *driver_table = &(smu_table->driver_table);
916 uint32_t max_table_size = 0;
917 int ret, i;
918
919 /* VRAM allocation for tool table */
920 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
921 ret = amdgpu_bo_create_kernel(adev,
922 tables[SMU_TABLE_PMSTATUSLOG].size,
923 tables[SMU_TABLE_PMSTATUSLOG].align,
924 tables[SMU_TABLE_PMSTATUSLOG].domain,
925 &tables[SMU_TABLE_PMSTATUSLOG].bo,
926 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
927 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
928 if (ret) {
929 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
930 return ret;
931 }
932 }
933
934 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
935 /* VRAM allocation for driver table */
936 for (i = 0; i < SMU_TABLE_COUNT; i++) {
937 if (tables[i].size == 0)
938 continue;
939
940 /* If one of the tables has VRAM domain restriction, keep it in
941 * VRAM
942 */
943 if ((tables[i].domain &
944 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
945 AMDGPU_GEM_DOMAIN_VRAM)
946 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
947
948 if (i == SMU_TABLE_PMSTATUSLOG)
949 continue;
950
951 if (max_table_size < tables[i].size)
952 max_table_size = tables[i].size;
953 }
954
955 driver_table->size = max_table_size;
956 driver_table->align = PAGE_SIZE;
957
958 ret = amdgpu_bo_create_kernel(adev,
959 driver_table->size,
960 driver_table->align,
961 driver_table->domain,
962 &driver_table->bo,
963 &driver_table->mc_address,
964 &driver_table->cpu_addr);
965 if (ret) {
966 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
967 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
968 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
969 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
970 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
971 }
972
973 return ret;
974 }
975
smu_fini_fb_allocations(struct smu_context * smu)976 static int smu_fini_fb_allocations(struct smu_context *smu)
977 {
978 struct smu_table_context *smu_table = &smu->smu_table;
979 struct smu_table *tables = smu_table->tables;
980 struct smu_table *driver_table = &(smu_table->driver_table);
981
982 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
983 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
984 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
985 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
986
987 amdgpu_bo_free_kernel(&driver_table->bo,
988 &driver_table->mc_address,
989 &driver_table->cpu_addr);
990
991 return 0;
992 }
993
994 /**
995 * smu_alloc_memory_pool - allocate memory pool in the system memory
996 *
997 * @smu: amdgpu_device pointer
998 *
999 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1000 * and DramLogSetDramAddr can notify it changed.
1001 *
1002 * Returns 0 on success, error on failure.
1003 */
smu_alloc_memory_pool(struct smu_context * smu)1004 static int smu_alloc_memory_pool(struct smu_context *smu)
1005 {
1006 struct amdgpu_device *adev = smu->adev;
1007 struct smu_table_context *smu_table = &smu->smu_table;
1008 struct smu_table *memory_pool = &smu_table->memory_pool;
1009 uint64_t pool_size = smu->pool_size;
1010 int ret = 0;
1011
1012 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1013 return ret;
1014
1015 memory_pool->size = pool_size;
1016 memory_pool->align = PAGE_SIZE;
1017 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1018
1019 switch (pool_size) {
1020 case SMU_MEMORY_POOL_SIZE_256_MB:
1021 case SMU_MEMORY_POOL_SIZE_512_MB:
1022 case SMU_MEMORY_POOL_SIZE_1_GB:
1023 case SMU_MEMORY_POOL_SIZE_2_GB:
1024 ret = amdgpu_bo_create_kernel(adev,
1025 memory_pool->size,
1026 memory_pool->align,
1027 memory_pool->domain,
1028 &memory_pool->bo,
1029 &memory_pool->mc_address,
1030 &memory_pool->cpu_addr);
1031 if (ret)
1032 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1033 break;
1034 default:
1035 break;
1036 }
1037
1038 return ret;
1039 }
1040
smu_free_memory_pool(struct smu_context * smu)1041 static int smu_free_memory_pool(struct smu_context *smu)
1042 {
1043 struct smu_table_context *smu_table = &smu->smu_table;
1044 struct smu_table *memory_pool = &smu_table->memory_pool;
1045
1046 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1047 return 0;
1048
1049 amdgpu_bo_free_kernel(&memory_pool->bo,
1050 &memory_pool->mc_address,
1051 &memory_pool->cpu_addr);
1052
1053 memset(memory_pool, 0, sizeof(struct smu_table));
1054
1055 return 0;
1056 }
1057
smu_alloc_dummy_read_table(struct smu_context * smu)1058 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1059 {
1060 struct smu_table_context *smu_table = &smu->smu_table;
1061 struct smu_table *dummy_read_1_table =
1062 &smu_table->dummy_read_1_table;
1063 struct amdgpu_device *adev = smu->adev;
1064 int ret = 0;
1065
1066 if (!dummy_read_1_table->size)
1067 return 0;
1068
1069 ret = amdgpu_bo_create_kernel(adev,
1070 dummy_read_1_table->size,
1071 dummy_read_1_table->align,
1072 dummy_read_1_table->domain,
1073 &dummy_read_1_table->bo,
1074 &dummy_read_1_table->mc_address,
1075 &dummy_read_1_table->cpu_addr);
1076 if (ret)
1077 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1078
1079 return ret;
1080 }
1081
smu_free_dummy_read_table(struct smu_context * smu)1082 static void smu_free_dummy_read_table(struct smu_context *smu)
1083 {
1084 struct smu_table_context *smu_table = &smu->smu_table;
1085 struct smu_table *dummy_read_1_table =
1086 &smu_table->dummy_read_1_table;
1087
1088
1089 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1090 &dummy_read_1_table->mc_address,
1091 &dummy_read_1_table->cpu_addr);
1092
1093 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1094 }
1095
smu_smc_table_sw_init(struct smu_context * smu)1096 static int smu_smc_table_sw_init(struct smu_context *smu)
1097 {
1098 int ret;
1099
1100 /**
1101 * Create smu_table structure, and init smc tables such as
1102 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1103 */
1104 ret = smu_init_smc_tables(smu);
1105 if (ret) {
1106 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1107 return ret;
1108 }
1109
1110 /**
1111 * Create smu_power_context structure, and allocate smu_dpm_context and
1112 * context size to fill the smu_power_context data.
1113 */
1114 ret = smu_init_power(smu);
1115 if (ret) {
1116 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1117 return ret;
1118 }
1119
1120 /*
1121 * allocate vram bos to store smc table contents.
1122 */
1123 ret = smu_init_fb_allocations(smu);
1124 if (ret)
1125 return ret;
1126
1127 ret = smu_alloc_memory_pool(smu);
1128 if (ret)
1129 return ret;
1130
1131 ret = smu_alloc_dummy_read_table(smu);
1132 if (ret)
1133 return ret;
1134
1135 ret = smu_i2c_init(smu);
1136 if (ret)
1137 return ret;
1138
1139 return 0;
1140 }
1141
smu_smc_table_sw_fini(struct smu_context * smu)1142 static int smu_smc_table_sw_fini(struct smu_context *smu)
1143 {
1144 int ret;
1145
1146 smu_i2c_fini(smu);
1147
1148 smu_free_dummy_read_table(smu);
1149
1150 ret = smu_free_memory_pool(smu);
1151 if (ret)
1152 return ret;
1153
1154 ret = smu_fini_fb_allocations(smu);
1155 if (ret)
1156 return ret;
1157
1158 ret = smu_fini_power(smu);
1159 if (ret) {
1160 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1161 return ret;
1162 }
1163
1164 ret = smu_fini_smc_tables(smu);
1165 if (ret) {
1166 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1167 return ret;
1168 }
1169
1170 return 0;
1171 }
1172
smu_throttling_logging_work_fn(struct work_struct * work)1173 static void smu_throttling_logging_work_fn(struct work_struct *work)
1174 {
1175 struct smu_context *smu = container_of(work, struct smu_context,
1176 throttling_logging_work);
1177
1178 smu_log_thermal_throttling(smu);
1179 }
1180
smu_interrupt_work_fn(struct work_struct * work)1181 static void smu_interrupt_work_fn(struct work_struct *work)
1182 {
1183 struct smu_context *smu = container_of(work, struct smu_context,
1184 interrupt_work);
1185
1186 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1187 smu->ppt_funcs->interrupt_work(smu);
1188 }
1189
smu_swctf_delayed_work_handler(struct work_struct * work)1190 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1191 {
1192 struct smu_context *smu =
1193 container_of(work, struct smu_context, swctf_delayed_work.work);
1194 struct smu_temperature_range *range =
1195 &smu->thermal_range;
1196 struct amdgpu_device *adev = smu->adev;
1197 uint32_t hotspot_tmp, size;
1198
1199 /*
1200 * If the hotspot temperature is confirmed as below SW CTF setting point
1201 * after the delay enforced, nothing will be done.
1202 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1203 */
1204 if (range->software_shutdown_temp &&
1205 smu->ppt_funcs->read_sensor &&
1206 !smu->ppt_funcs->read_sensor(smu,
1207 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1208 &hotspot_tmp,
1209 &size) &&
1210 hotspot_tmp / 1000 < range->software_shutdown_temp)
1211 return;
1212
1213 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1214 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1215 orderly_poweroff(true);
1216 }
1217
smu_init_xgmi_plpd_mode(struct smu_context * smu)1218 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1219 {
1220 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1221 struct smu_dpm_policy_ctxt *policy_ctxt;
1222 struct smu_dpm_policy *policy;
1223
1224 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1225 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1226 if (policy)
1227 policy->current_level = XGMI_PLPD_DEFAULT;
1228 return;
1229 }
1230
1231 /* PMFW put PLPD into default policy after enabling the feature */
1232 if (smu_feature_is_enabled(smu,
1233 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1234 if (policy)
1235 policy->current_level = XGMI_PLPD_DEFAULT;
1236 } else {
1237 policy_ctxt = dpm_ctxt->dpm_policies;
1238 if (policy_ctxt)
1239 policy_ctxt->policy_mask &=
1240 ~BIT(PP_PM_POLICY_XGMI_PLPD);
1241 }
1242 }
1243
smu_is_workload_profile_available(struct smu_context * smu,u32 profile)1244 static bool smu_is_workload_profile_available(struct smu_context *smu,
1245 u32 profile)
1246 {
1247 if (profile >= PP_SMC_POWER_PROFILE_COUNT)
1248 return false;
1249 return smu->workload_map && smu->workload_map[profile].valid_mapping;
1250 }
1251
smu_init_power_profile(struct smu_context * smu)1252 static void smu_init_power_profile(struct smu_context *smu)
1253 {
1254 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN) {
1255 if (smu->is_apu ||
1256 !smu_is_workload_profile_available(
1257 smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
1258 smu->power_profile_mode =
1259 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1260 else
1261 smu->power_profile_mode =
1262 PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1263 }
1264 smu_power_profile_mode_get(smu, smu->power_profile_mode);
1265 }
1266
smu_sw_init(struct amdgpu_ip_block * ip_block)1267 static int smu_sw_init(struct amdgpu_ip_block *ip_block)
1268 {
1269 struct amdgpu_device *adev = ip_block->adev;
1270 struct smu_context *smu = adev->powerplay.pp_handle;
1271 int ret;
1272
1273 smu->pool_size = adev->pm.smu_prv_buffer_size;
1274 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1275 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1276 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1277
1278 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1279 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1280 atomic64_set(&smu->throttle_int_counter, 0);
1281 smu->watermarks_bitmap = 0;
1282
1283 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1284 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1285 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1286 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1287
1288 smu_init_power_profile(smu);
1289 smu->display_config = &adev->pm.pm_display_cfg;
1290
1291 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1292 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1293
1294 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1295 smu_swctf_delayed_work_handler);
1296
1297 ret = smu_smc_table_sw_init(smu);
1298 if (ret) {
1299 dev_err(adev->dev, "Failed to sw init smc table!\n");
1300 return ret;
1301 }
1302
1303 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1304 ret = smu_get_vbios_bootup_values(smu);
1305 if (ret) {
1306 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1307 return ret;
1308 }
1309
1310 ret = smu_init_pptable_microcode(smu);
1311 if (ret) {
1312 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1313 return ret;
1314 }
1315
1316 ret = smu_register_irq_handler(smu);
1317 if (ret) {
1318 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1319 return ret;
1320 }
1321
1322 /* If there is no way to query fan control mode, fan control is not supported */
1323 if (!smu->ppt_funcs->get_fan_control_mode)
1324 smu->adev->pm.no_fan = true;
1325
1326 return 0;
1327 }
1328
smu_sw_fini(struct amdgpu_ip_block * ip_block)1329 static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
1330 {
1331 struct amdgpu_device *adev = ip_block->adev;
1332 struct smu_context *smu = adev->powerplay.pp_handle;
1333 int ret;
1334
1335 ret = smu_smc_table_sw_fini(smu);
1336 if (ret) {
1337 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1338 return ret;
1339 }
1340
1341 if (smu->custom_profile_params) {
1342 kfree(smu->custom_profile_params);
1343 smu->custom_profile_params = NULL;
1344 }
1345
1346 smu_fini_microcode(smu);
1347
1348 return 0;
1349 }
1350
smu_get_thermal_temperature_range(struct smu_context * smu)1351 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1352 {
1353 struct amdgpu_device *adev = smu->adev;
1354 struct smu_temperature_range *range =
1355 &smu->thermal_range;
1356 int ret = 0;
1357
1358 if (!smu->ppt_funcs->get_thermal_temperature_range)
1359 return 0;
1360
1361 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1362 if (ret)
1363 return ret;
1364
1365 adev->pm.dpm.thermal.min_temp = range->min;
1366 adev->pm.dpm.thermal.max_temp = range->max;
1367 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1368 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1369 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1370 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1371 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1372 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1373 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1374
1375 return ret;
1376 }
1377
1378 /**
1379 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1380 *
1381 * @smu: smu_context pointer
1382 *
1383 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1384 * Returns 0 on success, error on failure.
1385 */
smu_wbrf_handle_exclusion_ranges(struct smu_context * smu)1386 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1387 {
1388 struct wbrf_ranges_in_out wbrf_exclusion = {0};
1389 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1390 struct amdgpu_device *adev = smu->adev;
1391 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1392 uint64_t start, end;
1393 int ret, i, j;
1394
1395 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1396 if (ret) {
1397 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1398 return ret;
1399 }
1400
1401 /*
1402 * The exclusion ranges array we got might be filled with holes and duplicate
1403 * entries. For example:
1404 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1405 * We need to do some sortups to eliminate those holes and duplicate entries.
1406 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1407 */
1408 for (i = 0; i < num_of_wbrf_ranges; i++) {
1409 start = wifi_bands[i].start;
1410 end = wifi_bands[i].end;
1411
1412 /* get the last valid entry to fill the intermediate hole */
1413 if (!start && !end) {
1414 for (j = num_of_wbrf_ranges - 1; j > i; j--)
1415 if (wifi_bands[j].start && wifi_bands[j].end)
1416 break;
1417
1418 /* no valid entry left */
1419 if (j <= i)
1420 break;
1421
1422 start = wifi_bands[i].start = wifi_bands[j].start;
1423 end = wifi_bands[i].end = wifi_bands[j].end;
1424 wifi_bands[j].start = 0;
1425 wifi_bands[j].end = 0;
1426 num_of_wbrf_ranges = j;
1427 }
1428
1429 /* eliminate duplicate entries */
1430 for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1431 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1432 wifi_bands[j].start = 0;
1433 wifi_bands[j].end = 0;
1434 }
1435 }
1436 }
1437
1438 /* Send the sorted wifi_bands to PMFW */
1439 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1440 /* Try to set the wifi_bands again */
1441 if (unlikely(ret == -EBUSY)) {
1442 mdelay(5);
1443 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1444 }
1445
1446 return ret;
1447 }
1448
1449 /**
1450 * smu_wbrf_event_handler - handle notify events
1451 *
1452 * @nb: notifier block
1453 * @action: event type
1454 * @_arg: event data
1455 *
1456 * Calls relevant amdgpu function in response to wbrf event
1457 * notification from kernel.
1458 */
smu_wbrf_event_handler(struct notifier_block * nb,unsigned long action,void * _arg)1459 static int smu_wbrf_event_handler(struct notifier_block *nb,
1460 unsigned long action, void *_arg)
1461 {
1462 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1463
1464 switch (action) {
1465 case WBRF_CHANGED:
1466 schedule_delayed_work(&smu->wbrf_delayed_work,
1467 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1468 break;
1469 default:
1470 return NOTIFY_DONE;
1471 }
1472
1473 return NOTIFY_OK;
1474 }
1475
1476 /**
1477 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1478 *
1479 * @work: struct work_struct pointer
1480 *
1481 * Flood is over and driver will consume the latest exclusion ranges.
1482 */
smu_wbrf_delayed_work_handler(struct work_struct * work)1483 static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1484 {
1485 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1486
1487 smu_wbrf_handle_exclusion_ranges(smu);
1488 }
1489
1490 /**
1491 * smu_wbrf_support_check - check wbrf support
1492 *
1493 * @smu: smu_context pointer
1494 *
1495 * Verifies the ACPI interface whether wbrf is supported.
1496 */
smu_wbrf_support_check(struct smu_context * smu)1497 static void smu_wbrf_support_check(struct smu_context *smu)
1498 {
1499 struct amdgpu_device *adev = smu->adev;
1500
1501 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1502 acpi_amd_wbrf_supported_consumer(adev->dev);
1503
1504 if (smu->wbrf_supported)
1505 dev_info(adev->dev, "RF interference mitigation is supported\n");
1506 }
1507
1508 /**
1509 * smu_wbrf_init - init driver wbrf support
1510 *
1511 * @smu: smu_context pointer
1512 *
1513 * Verifies the AMD ACPI interfaces and registers with the wbrf
1514 * notifier chain if wbrf feature is supported.
1515 * Returns 0 on success, error on failure.
1516 */
smu_wbrf_init(struct smu_context * smu)1517 static int smu_wbrf_init(struct smu_context *smu)
1518 {
1519 int ret;
1520
1521 if (!smu->wbrf_supported)
1522 return 0;
1523
1524 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1525
1526 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1527 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1528 if (ret)
1529 return ret;
1530
1531 /*
1532 * Some wifiband exclusion ranges may be already there
1533 * before our driver loaded. To make sure our driver
1534 * is awared of those exclusion ranges.
1535 */
1536 schedule_delayed_work(&smu->wbrf_delayed_work,
1537 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1538
1539 return 0;
1540 }
1541
1542 /**
1543 * smu_wbrf_fini - tear down driver wbrf support
1544 *
1545 * @smu: smu_context pointer
1546 *
1547 * Unregisters with the wbrf notifier chain.
1548 */
smu_wbrf_fini(struct smu_context * smu)1549 static void smu_wbrf_fini(struct smu_context *smu)
1550 {
1551 if (!smu->wbrf_supported)
1552 return;
1553
1554 amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1555
1556 cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1557 }
1558
smu_smc_hw_setup(struct smu_context * smu)1559 static int smu_smc_hw_setup(struct smu_context *smu)
1560 {
1561 struct smu_feature *feature = &smu->smu_feature;
1562 struct amdgpu_device *adev = smu->adev;
1563 uint8_t pcie_gen = 0, pcie_width = 0;
1564 uint64_t features_supported;
1565 int ret = 0;
1566
1567 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1568 case IP_VERSION(11, 0, 7):
1569 case IP_VERSION(11, 0, 11):
1570 case IP_VERSION(11, 5, 0):
1571 case IP_VERSION(11, 0, 12):
1572 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1573 dev_info(adev->dev, "dpm has been enabled\n");
1574 ret = smu_system_features_control(smu, true);
1575 if (ret)
1576 dev_err(adev->dev, "Failed system features control!\n");
1577 return ret;
1578 }
1579 break;
1580 default:
1581 break;
1582 }
1583
1584 ret = smu_init_display_count(smu, 0);
1585 if (ret) {
1586 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1587 return ret;
1588 }
1589
1590 ret = smu_set_driver_table_location(smu);
1591 if (ret) {
1592 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1593 return ret;
1594 }
1595
1596 /*
1597 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1598 */
1599 ret = smu_set_tool_table_location(smu);
1600 if (ret) {
1601 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1602 return ret;
1603 }
1604
1605 /*
1606 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1607 * pool location.
1608 */
1609 ret = smu_notify_memory_pool_location(smu);
1610 if (ret) {
1611 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1612 return ret;
1613 }
1614
1615 /*
1616 * It is assumed the pptable used before runpm is same as
1617 * the one used afterwards. Thus, we can reuse the stored
1618 * copy and do not need to resetup the pptable again.
1619 */
1620 if (!adev->in_runpm) {
1621 ret = smu_setup_pptable(smu);
1622 if (ret) {
1623 dev_err(adev->dev, "Failed to setup pptable!\n");
1624 return ret;
1625 }
1626 }
1627
1628 /* smu_dump_pptable(smu); */
1629
1630 /*
1631 * With SCPM enabled, PSP is responsible for the PPTable transferring
1632 * (to SMU). Driver involvement is not needed and permitted.
1633 */
1634 if (!adev->scpm_enabled) {
1635 /*
1636 * Copy pptable bo in the vram to smc with SMU MSGs such as
1637 * SetDriverDramAddr and TransferTableDram2Smu.
1638 */
1639 ret = smu_write_pptable(smu);
1640 if (ret) {
1641 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1642 return ret;
1643 }
1644 }
1645
1646 /* issue Run*Btc msg */
1647 ret = smu_run_btc(smu);
1648 if (ret)
1649 return ret;
1650
1651 /* Enable UclkShadow on wbrf supported */
1652 if (smu->wbrf_supported) {
1653 ret = smu_enable_uclk_shadow(smu, true);
1654 if (ret) {
1655 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1656 return ret;
1657 }
1658 }
1659
1660 /*
1661 * With SCPM enabled, these actions(and relevant messages) are
1662 * not needed and permitted.
1663 */
1664 if (!adev->scpm_enabled) {
1665 ret = smu_feature_set_allowed_mask(smu);
1666 if (ret) {
1667 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1668 return ret;
1669 }
1670 }
1671
1672 ret = smu_system_features_control(smu, true);
1673 if (ret) {
1674 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1675 return ret;
1676 }
1677
1678 smu_init_xgmi_plpd_mode(smu);
1679
1680 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1681 if (ret) {
1682 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1683 return ret;
1684 }
1685 bitmap_copy(feature->supported,
1686 (unsigned long *)&features_supported,
1687 feature->feature_num);
1688
1689 if (!smu_is_dpm_running(smu))
1690 dev_info(adev->dev, "dpm has been disabled\n");
1691
1692 /*
1693 * Set initialized values (get from vbios) to dpm tables context such as
1694 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1695 * type of clks.
1696 */
1697 ret = smu_set_default_dpm_table(smu);
1698 if (ret) {
1699 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1700 return ret;
1701 }
1702
1703 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
1704 pcie_gen = 4;
1705 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1706 pcie_gen = 3;
1707 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1708 pcie_gen = 2;
1709 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1710 pcie_gen = 1;
1711 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1712 pcie_gen = 0;
1713
1714 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1715 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1716 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1717 */
1718 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
1719 pcie_width = 7;
1720 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1721 pcie_width = 6;
1722 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1723 pcie_width = 5;
1724 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1725 pcie_width = 4;
1726 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1727 pcie_width = 3;
1728 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1729 pcie_width = 2;
1730 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1731 pcie_width = 1;
1732 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1733 if (ret) {
1734 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1735 return ret;
1736 }
1737
1738 ret = smu_get_thermal_temperature_range(smu);
1739 if (ret) {
1740 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1741 return ret;
1742 }
1743
1744 ret = smu_enable_thermal_alert(smu);
1745 if (ret) {
1746 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1747 return ret;
1748 }
1749
1750 ret = smu_notify_display_change(smu);
1751 if (ret) {
1752 dev_err(adev->dev, "Failed to notify display change!\n");
1753 return ret;
1754 }
1755
1756 /*
1757 * Set min deep sleep dce fclk with bootup value from vbios via
1758 * SetMinDeepSleepDcefclk MSG.
1759 */
1760 ret = smu_set_min_dcef_deep_sleep(smu,
1761 smu->smu_table.boot_values.dcefclk / 100);
1762 if (ret) {
1763 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1764 return ret;
1765 }
1766
1767 /* Init wbrf support. Properly setup the notifier */
1768 ret = smu_wbrf_init(smu);
1769 if (ret)
1770 dev_err(adev->dev, "Error during wbrf init call\n");
1771
1772 return ret;
1773 }
1774
smu_start_smc_engine(struct smu_context * smu)1775 static int smu_start_smc_engine(struct smu_context *smu)
1776 {
1777 struct amdgpu_device *adev = smu->adev;
1778 int ret = 0;
1779
1780 smu->smc_fw_state = SMU_FW_INIT;
1781
1782 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1783 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1784 if (smu->ppt_funcs->load_microcode) {
1785 ret = smu->ppt_funcs->load_microcode(smu);
1786 if (ret)
1787 return ret;
1788 }
1789 }
1790 }
1791
1792 if (smu->ppt_funcs->check_fw_status) {
1793 ret = smu->ppt_funcs->check_fw_status(smu);
1794 if (ret) {
1795 dev_err(adev->dev, "SMC is not ready\n");
1796 return ret;
1797 }
1798 }
1799
1800 /*
1801 * Send msg GetDriverIfVersion to check if the return value is equal
1802 * with DRIVER_IF_VERSION of smc header.
1803 */
1804 ret = smu_check_fw_version(smu);
1805 if (ret)
1806 return ret;
1807
1808 return ret;
1809 }
1810
smu_hw_init(struct amdgpu_ip_block * ip_block)1811 static int smu_hw_init(struct amdgpu_ip_block *ip_block)
1812 {
1813 int ret;
1814 struct amdgpu_device *adev = ip_block->adev;
1815 struct smu_context *smu = adev->powerplay.pp_handle;
1816
1817 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1818 smu->pm_enabled = false;
1819 return 0;
1820 }
1821
1822 ret = smu_start_smc_engine(smu);
1823 if (ret) {
1824 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1825 return ret;
1826 }
1827
1828 /*
1829 * Check whether wbrf is supported. This needs to be done
1830 * before SMU setup starts since part of SMU configuration
1831 * relies on this.
1832 */
1833 smu_wbrf_support_check(smu);
1834
1835 if (smu->is_apu) {
1836 ret = smu_set_gfx_imu_enable(smu);
1837 if (ret)
1838 return ret;
1839 smu_dpm_set_vcn_enable(smu, true);
1840 smu_dpm_set_jpeg_enable(smu, true);
1841 smu_dpm_set_vpe_enable(smu, true);
1842 smu_dpm_set_umsch_mm_enable(smu, true);
1843 smu_set_mall_enable(smu);
1844 smu_set_gfx_cgpg(smu, true);
1845 }
1846
1847 if (!smu->pm_enabled)
1848 return 0;
1849
1850 ret = smu_get_driver_allowed_feature_mask(smu);
1851 if (ret)
1852 return ret;
1853
1854 ret = smu_smc_hw_setup(smu);
1855 if (ret) {
1856 dev_err(adev->dev, "Failed to setup smc hw!\n");
1857 return ret;
1858 }
1859
1860 /*
1861 * Move maximum sustainable clock retrieving here considering
1862 * 1. It is not needed on resume(from S3).
1863 * 2. DAL settings come between .hw_init and .late_init of SMU.
1864 * And DAL needs to know the maximum sustainable clocks. Thus
1865 * it cannot be put in .late_init().
1866 */
1867 ret = smu_init_max_sustainable_clocks(smu);
1868 if (ret) {
1869 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1870 return ret;
1871 }
1872
1873 adev->pm.dpm_enabled = true;
1874
1875 dev_info(adev->dev, "SMU is initialized successfully!\n");
1876
1877 return 0;
1878 }
1879
smu_disable_dpms(struct smu_context * smu)1880 static int smu_disable_dpms(struct smu_context *smu)
1881 {
1882 struct amdgpu_device *adev = smu->adev;
1883 int ret = 0;
1884 bool use_baco = !smu->is_apu &&
1885 ((amdgpu_in_reset(adev) &&
1886 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1887 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1888
1889 /*
1890 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1891 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1892 */
1893 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1894 case IP_VERSION(13, 0, 0):
1895 case IP_VERSION(13, 0, 7):
1896 case IP_VERSION(13, 0, 10):
1897 case IP_VERSION(14, 0, 2):
1898 case IP_VERSION(14, 0, 3):
1899 return 0;
1900 default:
1901 break;
1902 }
1903
1904 /*
1905 * For custom pptable uploading, skip the DPM features
1906 * disable process on Navi1x ASICs.
1907 * - As the gfx related features are under control of
1908 * RLC on those ASICs. RLC reinitialization will be
1909 * needed to reenable them. That will cost much more
1910 * efforts.
1911 *
1912 * - SMU firmware can handle the DPM reenablement
1913 * properly.
1914 */
1915 if (smu->uploading_custom_pp_table) {
1916 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1917 case IP_VERSION(11, 0, 0):
1918 case IP_VERSION(11, 0, 5):
1919 case IP_VERSION(11, 0, 9):
1920 case IP_VERSION(11, 0, 7):
1921 case IP_VERSION(11, 0, 11):
1922 case IP_VERSION(11, 5, 0):
1923 case IP_VERSION(11, 0, 12):
1924 case IP_VERSION(11, 0, 13):
1925 return 0;
1926 default:
1927 break;
1928 }
1929 }
1930
1931 /*
1932 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1933 * on BACO in. Driver involvement is unnecessary.
1934 */
1935 if (use_baco) {
1936 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1937 case IP_VERSION(11, 0, 7):
1938 case IP_VERSION(11, 0, 0):
1939 case IP_VERSION(11, 0, 5):
1940 case IP_VERSION(11, 0, 9):
1941 case IP_VERSION(13, 0, 7):
1942 return 0;
1943 default:
1944 break;
1945 }
1946 }
1947
1948 /*
1949 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
1950 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1951 */
1952 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
1953 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
1954 return 0;
1955
1956 /*
1957 * For gpu reset, runpm and hibernation through BACO,
1958 * BACO feature has to be kept enabled.
1959 */
1960 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1961 ret = smu_disable_all_features_with_exception(smu,
1962 SMU_FEATURE_BACO_BIT);
1963 if (ret)
1964 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1965 } else {
1966 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1967 if (!adev->scpm_enabled) {
1968 ret = smu_system_features_control(smu, false);
1969 if (ret)
1970 dev_err(adev->dev, "Failed to disable smu features.\n");
1971 }
1972 }
1973
1974 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
1975 * otherwise SMU will hang while interacting with RLC if RLC is halted
1976 * this is a WA for Vangogh asic which fix the SMU hang issue.
1977 */
1978 ret = smu_notify_rlc_state(smu, false);
1979 if (ret) {
1980 dev_err(adev->dev, "Fail to notify rlc status!\n");
1981 return ret;
1982 }
1983
1984 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
1985 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
1986 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1987 adev->gfx.rlc.funcs->stop(adev);
1988
1989 return ret;
1990 }
1991
smu_smc_hw_cleanup(struct smu_context * smu)1992 static int smu_smc_hw_cleanup(struct smu_context *smu)
1993 {
1994 struct amdgpu_device *adev = smu->adev;
1995 int ret = 0;
1996
1997 smu_wbrf_fini(smu);
1998
1999 cancel_work_sync(&smu->throttling_logging_work);
2000 cancel_work_sync(&smu->interrupt_work);
2001
2002 ret = smu_disable_thermal_alert(smu);
2003 if (ret) {
2004 dev_err(adev->dev, "Fail to disable thermal alert!\n");
2005 return ret;
2006 }
2007
2008 cancel_delayed_work_sync(&smu->swctf_delayed_work);
2009
2010 ret = smu_disable_dpms(smu);
2011 if (ret) {
2012 dev_err(adev->dev, "Fail to disable dpm features!\n");
2013 return ret;
2014 }
2015
2016 return 0;
2017 }
2018
smu_reset_mp1_state(struct smu_context * smu)2019 static int smu_reset_mp1_state(struct smu_context *smu)
2020 {
2021 struct amdgpu_device *adev = smu->adev;
2022 int ret = 0;
2023
2024 if ((!adev->in_runpm) && (!adev->in_suspend) &&
2025 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2026 IP_VERSION(13, 0, 10) &&
2027 !amdgpu_device_has_display_hardware(adev))
2028 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2029
2030 return ret;
2031 }
2032
smu_hw_fini(struct amdgpu_ip_block * ip_block)2033 static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
2034 {
2035 struct amdgpu_device *adev = ip_block->adev;
2036 struct smu_context *smu = adev->powerplay.pp_handle;
2037 int ret;
2038
2039 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2040 return 0;
2041
2042 smu_dpm_set_vcn_enable(smu, false);
2043 smu_dpm_set_jpeg_enable(smu, false);
2044 smu_dpm_set_vpe_enable(smu, false);
2045 smu_dpm_set_umsch_mm_enable(smu, false);
2046
2047 adev->vcn.cur_state = AMD_PG_STATE_GATE;
2048 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2049
2050 if (!smu->pm_enabled)
2051 return 0;
2052
2053 adev->pm.dpm_enabled = false;
2054
2055 ret = smu_smc_hw_cleanup(smu);
2056 if (ret)
2057 return ret;
2058
2059 ret = smu_reset_mp1_state(smu);
2060 if (ret)
2061 return ret;
2062
2063 return 0;
2064 }
2065
smu_late_fini(struct amdgpu_ip_block * ip_block)2066 static void smu_late_fini(struct amdgpu_ip_block *ip_block)
2067 {
2068 struct amdgpu_device *adev = ip_block->adev;
2069 struct smu_context *smu = adev->powerplay.pp_handle;
2070
2071 kfree(smu);
2072 }
2073
smu_reset(struct smu_context * smu)2074 static int smu_reset(struct smu_context *smu)
2075 {
2076 struct amdgpu_device *adev = smu->adev;
2077 struct amdgpu_ip_block *ip_block;
2078 int ret;
2079
2080 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
2081 if (!ip_block)
2082 return -EINVAL;
2083
2084 ret = smu_hw_fini(ip_block);
2085 if (ret)
2086 return ret;
2087
2088 ret = smu_hw_init(ip_block);
2089 if (ret)
2090 return ret;
2091
2092 ret = smu_late_init(ip_block);
2093 if (ret)
2094 return ret;
2095
2096 return 0;
2097 }
2098
smu_suspend(struct amdgpu_ip_block * ip_block)2099 static int smu_suspend(struct amdgpu_ip_block *ip_block)
2100 {
2101 struct amdgpu_device *adev = ip_block->adev;
2102 struct smu_context *smu = adev->powerplay.pp_handle;
2103 int ret;
2104 uint64_t count;
2105
2106 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2107 return 0;
2108
2109 if (!smu->pm_enabled)
2110 return 0;
2111
2112 adev->pm.dpm_enabled = false;
2113
2114 ret = smu_smc_hw_cleanup(smu);
2115 if (ret)
2116 return ret;
2117
2118 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2119
2120 smu_set_gfx_cgpg(smu, false);
2121
2122 /*
2123 * pwfw resets entrycount when device is suspended, so we save the
2124 * last value to be used when we resume to keep it consistent
2125 */
2126 ret = smu_get_entrycount_gfxoff(smu, &count);
2127 if (!ret)
2128 adev->gfx.gfx_off_entrycount = count;
2129
2130 /* clear this on suspend so it will get reprogrammed on resume */
2131 smu->workload_mask = 0;
2132
2133 return 0;
2134 }
2135
smu_resume(struct amdgpu_ip_block * ip_block)2136 static int smu_resume(struct amdgpu_ip_block *ip_block)
2137 {
2138 int ret;
2139 struct amdgpu_device *adev = ip_block->adev;
2140 struct smu_context *smu = adev->powerplay.pp_handle;
2141
2142 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
2143 return 0;
2144
2145 if (!smu->pm_enabled)
2146 return 0;
2147
2148 dev_info(adev->dev, "SMU is resuming...\n");
2149
2150 ret = smu_start_smc_engine(smu);
2151 if (ret) {
2152 dev_err(adev->dev, "SMC engine is not correctly up!\n");
2153 return ret;
2154 }
2155
2156 ret = smu_smc_hw_setup(smu);
2157 if (ret) {
2158 dev_err(adev->dev, "Failed to setup smc hw!\n");
2159 return ret;
2160 }
2161
2162 ret = smu_set_gfx_imu_enable(smu);
2163 if (ret)
2164 return ret;
2165
2166 smu_set_gfx_cgpg(smu, true);
2167
2168 smu->disable_uclk_switch = 0;
2169
2170 adev->pm.dpm_enabled = true;
2171
2172 dev_info(adev->dev, "SMU is resumed successfully!\n");
2173
2174 return 0;
2175 }
2176
smu_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)2177 static int smu_display_configuration_change(void *handle,
2178 const struct amd_pp_display_configuration *display_config)
2179 {
2180 struct smu_context *smu = handle;
2181
2182 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2183 return -EOPNOTSUPP;
2184
2185 if (!display_config)
2186 return -EINVAL;
2187
2188 smu_set_min_dcef_deep_sleep(smu,
2189 display_config->min_dcef_deep_sleep_set_clk / 100);
2190
2191 return 0;
2192 }
2193
smu_set_clockgating_state(void * handle,enum amd_clockgating_state state)2194 static int smu_set_clockgating_state(void *handle,
2195 enum amd_clockgating_state state)
2196 {
2197 return 0;
2198 }
2199
smu_set_powergating_state(void * handle,enum amd_powergating_state state)2200 static int smu_set_powergating_state(void *handle,
2201 enum amd_powergating_state state)
2202 {
2203 return 0;
2204 }
2205
smu_enable_umd_pstate(void * handle,enum amd_dpm_forced_level * level)2206 static int smu_enable_umd_pstate(void *handle,
2207 enum amd_dpm_forced_level *level)
2208 {
2209 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2210 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2211 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2212 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2213
2214 struct smu_context *smu = (struct smu_context*)(handle);
2215 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2216
2217 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2218 return -EINVAL;
2219
2220 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2221 /* enter umd pstate, save current level, disable gfx cg*/
2222 if (*level & profile_mode_mask) {
2223 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2224 smu_gpo_control(smu, false);
2225 smu_gfx_ulv_control(smu, false);
2226 smu_deep_sleep_control(smu, false);
2227 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2228 }
2229 } else {
2230 /* exit umd pstate, restore level, enable gfx cg*/
2231 if (!(*level & profile_mode_mask)) {
2232 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2233 *level = smu_dpm_ctx->saved_dpm_level;
2234 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2235 smu_deep_sleep_control(smu, true);
2236 smu_gfx_ulv_control(smu, true);
2237 smu_gpo_control(smu, true);
2238 }
2239 }
2240
2241 return 0;
2242 }
2243
smu_bump_power_profile_mode(struct smu_context * smu,long * custom_params,u32 custom_params_max_idx)2244 static int smu_bump_power_profile_mode(struct smu_context *smu,
2245 long *custom_params,
2246 u32 custom_params_max_idx)
2247 {
2248 u32 workload_mask = 0;
2249 int i, ret = 0;
2250
2251 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
2252 if (smu->workload_refcount[i])
2253 workload_mask |= 1 << i;
2254 }
2255
2256 if (smu->workload_mask == workload_mask)
2257 return 0;
2258
2259 if (smu->ppt_funcs->set_power_profile_mode)
2260 ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
2261 custom_params,
2262 custom_params_max_idx);
2263
2264 if (!ret)
2265 smu->workload_mask = workload_mask;
2266
2267 return ret;
2268 }
2269
smu_power_profile_mode_get(struct smu_context * smu,enum PP_SMC_POWER_PROFILE profile_mode)2270 static void smu_power_profile_mode_get(struct smu_context *smu,
2271 enum PP_SMC_POWER_PROFILE profile_mode)
2272 {
2273 smu->workload_refcount[profile_mode]++;
2274 }
2275
smu_power_profile_mode_put(struct smu_context * smu,enum PP_SMC_POWER_PROFILE profile_mode)2276 static void smu_power_profile_mode_put(struct smu_context *smu,
2277 enum PP_SMC_POWER_PROFILE profile_mode)
2278 {
2279 if (smu->workload_refcount[profile_mode])
2280 smu->workload_refcount[profile_mode]--;
2281 }
2282
smu_adjust_power_state_dynamic(struct smu_context * smu,enum amd_dpm_forced_level level,bool skip_display_settings)2283 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2284 enum amd_dpm_forced_level level,
2285 bool skip_display_settings)
2286 {
2287 int ret = 0;
2288 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2289
2290 if (!skip_display_settings) {
2291 ret = smu_display_config_changed(smu);
2292 if (ret) {
2293 dev_err(smu->adev->dev, "Failed to change display config!");
2294 return ret;
2295 }
2296 }
2297
2298 ret = smu_apply_clocks_adjust_rules(smu);
2299 if (ret) {
2300 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2301 return ret;
2302 }
2303
2304 if (!skip_display_settings) {
2305 ret = smu_notify_smc_display_config(smu);
2306 if (ret) {
2307 dev_err(smu->adev->dev, "Failed to notify smc display config!");
2308 return ret;
2309 }
2310 }
2311
2312 if (smu_dpm_ctx->dpm_level != level) {
2313 ret = smu_asic_set_performance_level(smu, level);
2314 if (ret) {
2315 dev_err(smu->adev->dev, "Failed to set performance level!");
2316 return ret;
2317 }
2318
2319 /* update the saved copy */
2320 smu_dpm_ctx->dpm_level = level;
2321 }
2322
2323 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2324 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2325 smu_bump_power_profile_mode(smu, NULL, 0);
2326
2327 return ret;
2328 }
2329
smu_handle_task(struct smu_context * smu,enum amd_dpm_forced_level level,enum amd_pp_task task_id)2330 static int smu_handle_task(struct smu_context *smu,
2331 enum amd_dpm_forced_level level,
2332 enum amd_pp_task task_id)
2333 {
2334 int ret = 0;
2335
2336 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2337 return -EOPNOTSUPP;
2338
2339 switch (task_id) {
2340 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2341 ret = smu_pre_display_config_changed(smu);
2342 if (ret)
2343 return ret;
2344 ret = smu_adjust_power_state_dynamic(smu, level, false);
2345 break;
2346 case AMD_PP_TASK_COMPLETE_INIT:
2347 ret = smu_adjust_power_state_dynamic(smu, level, true);
2348 break;
2349 case AMD_PP_TASK_READJUST_POWER_STATE:
2350 ret = smu_adjust_power_state_dynamic(smu, level, true);
2351 break;
2352 default:
2353 break;
2354 }
2355
2356 return ret;
2357 }
2358
smu_handle_dpm_task(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)2359 static int smu_handle_dpm_task(void *handle,
2360 enum amd_pp_task task_id,
2361 enum amd_pm_state_type *user_state)
2362 {
2363 struct smu_context *smu = handle;
2364 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2365
2366 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2367
2368 }
2369
smu_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool enable)2370 static int smu_switch_power_profile(void *handle,
2371 enum PP_SMC_POWER_PROFILE type,
2372 bool enable)
2373 {
2374 struct smu_context *smu = handle;
2375 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2376 int ret;
2377
2378 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2379 return -EOPNOTSUPP;
2380
2381 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2382 return -EINVAL;
2383
2384 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2385 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2386 if (enable)
2387 smu_power_profile_mode_get(smu, type);
2388 else
2389 smu_power_profile_mode_put(smu, type);
2390 ret = smu_bump_power_profile_mode(smu, NULL, 0);
2391 if (ret) {
2392 if (enable)
2393 smu_power_profile_mode_put(smu, type);
2394 else
2395 smu_power_profile_mode_get(smu, type);
2396 return ret;
2397 }
2398 }
2399
2400 return 0;
2401 }
2402
smu_get_performance_level(void * handle)2403 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2404 {
2405 struct smu_context *smu = handle;
2406 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2407
2408 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2409 return -EOPNOTSUPP;
2410
2411 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2412 return -EINVAL;
2413
2414 return smu_dpm_ctx->dpm_level;
2415 }
2416
smu_force_performance_level(void * handle,enum amd_dpm_forced_level level)2417 static int smu_force_performance_level(void *handle,
2418 enum amd_dpm_forced_level level)
2419 {
2420 struct smu_context *smu = handle;
2421 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2422 int ret = 0;
2423
2424 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2425 return -EOPNOTSUPP;
2426
2427 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2428 return -EINVAL;
2429
2430 ret = smu_enable_umd_pstate(smu, &level);
2431 if (ret)
2432 return ret;
2433
2434 ret = smu_handle_task(smu, level,
2435 AMD_PP_TASK_READJUST_POWER_STATE);
2436
2437 /* reset user dpm clock state */
2438 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2439 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2440 smu->user_dpm_profile.clk_dependency = 0;
2441 }
2442
2443 return ret;
2444 }
2445
smu_set_display_count(void * handle,uint32_t count)2446 static int smu_set_display_count(void *handle, uint32_t count)
2447 {
2448 struct smu_context *smu = handle;
2449
2450 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2451 return -EOPNOTSUPP;
2452
2453 return smu_init_display_count(smu, count);
2454 }
2455
smu_force_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)2456 static int smu_force_smuclk_levels(struct smu_context *smu,
2457 enum smu_clk_type clk_type,
2458 uint32_t mask)
2459 {
2460 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2461 int ret = 0;
2462
2463 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2464 return -EOPNOTSUPP;
2465
2466 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2467 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2468 return -EINVAL;
2469 }
2470
2471 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2472 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2473 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2474 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2475 smu_set_user_clk_dependencies(smu, clk_type);
2476 }
2477 }
2478
2479 return ret;
2480 }
2481
smu_force_ppclk_levels(void * handle,enum pp_clock_type type,uint32_t mask)2482 static int smu_force_ppclk_levels(void *handle,
2483 enum pp_clock_type type,
2484 uint32_t mask)
2485 {
2486 struct smu_context *smu = handle;
2487 enum smu_clk_type clk_type;
2488
2489 switch (type) {
2490 case PP_SCLK:
2491 clk_type = SMU_SCLK; break;
2492 case PP_MCLK:
2493 clk_type = SMU_MCLK; break;
2494 case PP_PCIE:
2495 clk_type = SMU_PCIE; break;
2496 case PP_SOCCLK:
2497 clk_type = SMU_SOCCLK; break;
2498 case PP_FCLK:
2499 clk_type = SMU_FCLK; break;
2500 case PP_DCEFCLK:
2501 clk_type = SMU_DCEFCLK; break;
2502 case PP_VCLK:
2503 clk_type = SMU_VCLK; break;
2504 case PP_VCLK1:
2505 clk_type = SMU_VCLK1; break;
2506 case PP_DCLK:
2507 clk_type = SMU_DCLK; break;
2508 case PP_DCLK1:
2509 clk_type = SMU_DCLK1; break;
2510 case OD_SCLK:
2511 clk_type = SMU_OD_SCLK; break;
2512 case OD_MCLK:
2513 clk_type = SMU_OD_MCLK; break;
2514 case OD_VDDC_CURVE:
2515 clk_type = SMU_OD_VDDC_CURVE; break;
2516 case OD_RANGE:
2517 clk_type = SMU_OD_RANGE; break;
2518 default:
2519 return -EINVAL;
2520 }
2521
2522 return smu_force_smuclk_levels(smu, clk_type, mask);
2523 }
2524
2525 /*
2526 * On system suspending or resetting, the dpm_enabled
2527 * flag will be cleared. So that those SMU services which
2528 * are not supported will be gated.
2529 * However, the mp1 state setting should still be granted
2530 * even if the dpm_enabled cleared.
2531 */
smu_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)2532 static int smu_set_mp1_state(void *handle,
2533 enum pp_mp1_state mp1_state)
2534 {
2535 struct smu_context *smu = handle;
2536 int ret = 0;
2537
2538 if (!smu->pm_enabled)
2539 return -EOPNOTSUPP;
2540
2541 if (smu->ppt_funcs &&
2542 smu->ppt_funcs->set_mp1_state)
2543 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2544
2545 return ret;
2546 }
2547
smu_set_df_cstate(void * handle,enum pp_df_cstate state)2548 static int smu_set_df_cstate(void *handle,
2549 enum pp_df_cstate state)
2550 {
2551 struct smu_context *smu = handle;
2552 int ret = 0;
2553
2554 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2555 return -EOPNOTSUPP;
2556
2557 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2558 return 0;
2559
2560 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2561 if (ret)
2562 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2563
2564 return ret;
2565 }
2566
smu_write_watermarks_table(struct smu_context * smu)2567 int smu_write_watermarks_table(struct smu_context *smu)
2568 {
2569 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2570 return -EOPNOTSUPP;
2571
2572 return smu_set_watermarks_table(smu, NULL);
2573 }
2574
smu_set_watermarks_for_clock_ranges(void * handle,struct pp_smu_wm_range_sets * clock_ranges)2575 static int smu_set_watermarks_for_clock_ranges(void *handle,
2576 struct pp_smu_wm_range_sets *clock_ranges)
2577 {
2578 struct smu_context *smu = handle;
2579
2580 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2581 return -EOPNOTSUPP;
2582
2583 if (smu->disable_watermark)
2584 return 0;
2585
2586 return smu_set_watermarks_table(smu, clock_ranges);
2587 }
2588
smu_set_ac_dc(struct smu_context * smu)2589 int smu_set_ac_dc(struct smu_context *smu)
2590 {
2591 int ret = 0;
2592
2593 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2594 return -EOPNOTSUPP;
2595
2596 /* controlled by firmware */
2597 if (smu->dc_controlled_by_gpio)
2598 return 0;
2599
2600 ret = smu_set_power_source(smu,
2601 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2602 SMU_POWER_SOURCE_DC);
2603 if (ret)
2604 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2605 smu->adev->pm.ac_power ? "AC" : "DC");
2606
2607 return ret;
2608 }
2609
2610 const struct amd_ip_funcs smu_ip_funcs = {
2611 .name = "smu",
2612 .early_init = smu_early_init,
2613 .late_init = smu_late_init,
2614 .sw_init = smu_sw_init,
2615 .sw_fini = smu_sw_fini,
2616 .hw_init = smu_hw_init,
2617 .hw_fini = smu_hw_fini,
2618 .late_fini = smu_late_fini,
2619 .suspend = smu_suspend,
2620 .resume = smu_resume,
2621 .is_idle = NULL,
2622 .check_soft_reset = NULL,
2623 .wait_for_idle = NULL,
2624 .soft_reset = NULL,
2625 .set_clockgating_state = smu_set_clockgating_state,
2626 .set_powergating_state = smu_set_powergating_state,
2627 };
2628
2629 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2630 .type = AMD_IP_BLOCK_TYPE_SMC,
2631 .major = 11,
2632 .minor = 0,
2633 .rev = 0,
2634 .funcs = &smu_ip_funcs,
2635 };
2636
2637 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2638 .type = AMD_IP_BLOCK_TYPE_SMC,
2639 .major = 12,
2640 .minor = 0,
2641 .rev = 0,
2642 .funcs = &smu_ip_funcs,
2643 };
2644
2645 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2646 .type = AMD_IP_BLOCK_TYPE_SMC,
2647 .major = 13,
2648 .minor = 0,
2649 .rev = 0,
2650 .funcs = &smu_ip_funcs,
2651 };
2652
2653 const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2654 .type = AMD_IP_BLOCK_TYPE_SMC,
2655 .major = 14,
2656 .minor = 0,
2657 .rev = 0,
2658 .funcs = &smu_ip_funcs,
2659 };
2660
smu_load_microcode(void * handle)2661 static int smu_load_microcode(void *handle)
2662 {
2663 struct smu_context *smu = handle;
2664 struct amdgpu_device *adev = smu->adev;
2665 int ret = 0;
2666
2667 if (!smu->pm_enabled)
2668 return -EOPNOTSUPP;
2669
2670 /* This should be used for non PSP loading */
2671 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2672 return 0;
2673
2674 if (smu->ppt_funcs->load_microcode) {
2675 ret = smu->ppt_funcs->load_microcode(smu);
2676 if (ret) {
2677 dev_err(adev->dev, "Load microcode failed\n");
2678 return ret;
2679 }
2680 }
2681
2682 if (smu->ppt_funcs->check_fw_status) {
2683 ret = smu->ppt_funcs->check_fw_status(smu);
2684 if (ret) {
2685 dev_err(adev->dev, "SMC is not ready\n");
2686 return ret;
2687 }
2688 }
2689
2690 return ret;
2691 }
2692
smu_set_gfx_cgpg(struct smu_context * smu,bool enabled)2693 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2694 {
2695 int ret = 0;
2696
2697 if (smu->ppt_funcs->set_gfx_cgpg)
2698 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2699
2700 return ret;
2701 }
2702
smu_set_fan_speed_rpm(void * handle,uint32_t speed)2703 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2704 {
2705 struct smu_context *smu = handle;
2706 int ret = 0;
2707
2708 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2709 return -EOPNOTSUPP;
2710
2711 if (!smu->ppt_funcs->set_fan_speed_rpm)
2712 return -EOPNOTSUPP;
2713
2714 if (speed == U32_MAX)
2715 return -EINVAL;
2716
2717 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2718 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2719 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2720 smu->user_dpm_profile.fan_speed_rpm = speed;
2721
2722 /* Override custom PWM setting as they cannot co-exist */
2723 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2724 smu->user_dpm_profile.fan_speed_pwm = 0;
2725 }
2726
2727 return ret;
2728 }
2729
2730 /**
2731 * smu_get_power_limit - Request one of the SMU Power Limits
2732 *
2733 * @handle: pointer to smu context
2734 * @limit: requested limit is written back to this variable
2735 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2736 * @pp_power_type: &pp_power_type type of power
2737 * Return: 0 on success, <0 on error
2738 *
2739 */
smu_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type pp_power_type)2740 int smu_get_power_limit(void *handle,
2741 uint32_t *limit,
2742 enum pp_power_limit_level pp_limit_level,
2743 enum pp_power_type pp_power_type)
2744 {
2745 struct smu_context *smu = handle;
2746 struct amdgpu_device *adev = smu->adev;
2747 enum smu_ppt_limit_level limit_level;
2748 uint32_t limit_type;
2749 int ret = 0;
2750
2751 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2752 return -EOPNOTSUPP;
2753
2754 switch (pp_power_type) {
2755 case PP_PWR_TYPE_SUSTAINED:
2756 limit_type = SMU_DEFAULT_PPT_LIMIT;
2757 break;
2758 case PP_PWR_TYPE_FAST:
2759 limit_type = SMU_FAST_PPT_LIMIT;
2760 break;
2761 default:
2762 return -EOPNOTSUPP;
2763 }
2764
2765 switch (pp_limit_level) {
2766 case PP_PWR_LIMIT_CURRENT:
2767 limit_level = SMU_PPT_LIMIT_CURRENT;
2768 break;
2769 case PP_PWR_LIMIT_DEFAULT:
2770 limit_level = SMU_PPT_LIMIT_DEFAULT;
2771 break;
2772 case PP_PWR_LIMIT_MAX:
2773 limit_level = SMU_PPT_LIMIT_MAX;
2774 break;
2775 case PP_PWR_LIMIT_MIN:
2776 limit_level = SMU_PPT_LIMIT_MIN;
2777 break;
2778 default:
2779 return -EOPNOTSUPP;
2780 }
2781
2782 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2783 if (smu->ppt_funcs->get_ppt_limit)
2784 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2785 } else {
2786 switch (limit_level) {
2787 case SMU_PPT_LIMIT_CURRENT:
2788 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2789 case IP_VERSION(13, 0, 2):
2790 case IP_VERSION(13, 0, 6):
2791 case IP_VERSION(13, 0, 14):
2792 case IP_VERSION(11, 0, 7):
2793 case IP_VERSION(11, 0, 11):
2794 case IP_VERSION(11, 0, 12):
2795 case IP_VERSION(11, 0, 13):
2796 ret = smu_get_asic_power_limits(smu,
2797 &smu->current_power_limit,
2798 NULL, NULL, NULL);
2799 break;
2800 default:
2801 break;
2802 }
2803 *limit = smu->current_power_limit;
2804 break;
2805 case SMU_PPT_LIMIT_DEFAULT:
2806 *limit = smu->default_power_limit;
2807 break;
2808 case SMU_PPT_LIMIT_MAX:
2809 *limit = smu->max_power_limit;
2810 break;
2811 case SMU_PPT_LIMIT_MIN:
2812 *limit = smu->min_power_limit;
2813 break;
2814 default:
2815 return -EINVAL;
2816 }
2817 }
2818
2819 return ret;
2820 }
2821
smu_set_power_limit(void * handle,uint32_t limit)2822 static int smu_set_power_limit(void *handle, uint32_t limit)
2823 {
2824 struct smu_context *smu = handle;
2825 uint32_t limit_type = limit >> 24;
2826 int ret = 0;
2827
2828 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2829 return -EOPNOTSUPP;
2830
2831 limit &= (1<<24)-1;
2832 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2833 if (smu->ppt_funcs->set_power_limit)
2834 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2835
2836 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2837 dev_err(smu->adev->dev,
2838 "New power limit (%d) is out of range [%d,%d]\n",
2839 limit, smu->min_power_limit, smu->max_power_limit);
2840 return -EINVAL;
2841 }
2842
2843 if (!limit)
2844 limit = smu->current_power_limit;
2845
2846 if (smu->ppt_funcs->set_power_limit) {
2847 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2848 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2849 smu->user_dpm_profile.power_limit = limit;
2850 }
2851
2852 return ret;
2853 }
2854
smu_print_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)2855 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2856 {
2857 int ret = 0;
2858
2859 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2860 return -EOPNOTSUPP;
2861
2862 if (smu->ppt_funcs->print_clk_levels)
2863 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2864
2865 return ret;
2866 }
2867
smu_convert_to_smuclk(enum pp_clock_type type)2868 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2869 {
2870 enum smu_clk_type clk_type;
2871
2872 switch (type) {
2873 case PP_SCLK:
2874 clk_type = SMU_SCLK; break;
2875 case PP_MCLK:
2876 clk_type = SMU_MCLK; break;
2877 case PP_PCIE:
2878 clk_type = SMU_PCIE; break;
2879 case PP_SOCCLK:
2880 clk_type = SMU_SOCCLK; break;
2881 case PP_FCLK:
2882 clk_type = SMU_FCLK; break;
2883 case PP_DCEFCLK:
2884 clk_type = SMU_DCEFCLK; break;
2885 case PP_VCLK:
2886 clk_type = SMU_VCLK; break;
2887 case PP_VCLK1:
2888 clk_type = SMU_VCLK1; break;
2889 case PP_DCLK:
2890 clk_type = SMU_DCLK; break;
2891 case PP_DCLK1:
2892 clk_type = SMU_DCLK1; break;
2893 case OD_SCLK:
2894 clk_type = SMU_OD_SCLK; break;
2895 case OD_MCLK:
2896 clk_type = SMU_OD_MCLK; break;
2897 case OD_VDDC_CURVE:
2898 clk_type = SMU_OD_VDDC_CURVE; break;
2899 case OD_RANGE:
2900 clk_type = SMU_OD_RANGE; break;
2901 case OD_VDDGFX_OFFSET:
2902 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2903 case OD_CCLK:
2904 clk_type = SMU_OD_CCLK; break;
2905 case OD_FAN_CURVE:
2906 clk_type = SMU_OD_FAN_CURVE; break;
2907 case OD_ACOUSTIC_LIMIT:
2908 clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
2909 case OD_ACOUSTIC_TARGET:
2910 clk_type = SMU_OD_ACOUSTIC_TARGET; break;
2911 case OD_FAN_TARGET_TEMPERATURE:
2912 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
2913 case OD_FAN_MINIMUM_PWM:
2914 clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
2915 case OD_FAN_ZERO_RPM_ENABLE:
2916 clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;
2917 case OD_FAN_ZERO_RPM_STOP_TEMP:
2918 clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;
2919 default:
2920 clk_type = SMU_CLK_COUNT; break;
2921 }
2922
2923 return clk_type;
2924 }
2925
smu_print_ppclk_levels(void * handle,enum pp_clock_type type,char * buf)2926 static int smu_print_ppclk_levels(void *handle,
2927 enum pp_clock_type type,
2928 char *buf)
2929 {
2930 struct smu_context *smu = handle;
2931 enum smu_clk_type clk_type;
2932
2933 clk_type = smu_convert_to_smuclk(type);
2934 if (clk_type == SMU_CLK_COUNT)
2935 return -EINVAL;
2936
2937 return smu_print_smuclk_levels(smu, clk_type, buf);
2938 }
2939
smu_emit_ppclk_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)2940 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2941 {
2942 struct smu_context *smu = handle;
2943 enum smu_clk_type clk_type;
2944
2945 clk_type = smu_convert_to_smuclk(type);
2946 if (clk_type == SMU_CLK_COUNT)
2947 return -EINVAL;
2948
2949 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2950 return -EOPNOTSUPP;
2951
2952 if (!smu->ppt_funcs->emit_clk_levels)
2953 return -ENOENT;
2954
2955 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2956
2957 }
2958
smu_od_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2959 static int smu_od_edit_dpm_table(void *handle,
2960 enum PP_OD_DPM_TABLE_COMMAND type,
2961 long *input, uint32_t size)
2962 {
2963 struct smu_context *smu = handle;
2964 int ret = 0;
2965
2966 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2967 return -EOPNOTSUPP;
2968
2969 if (smu->ppt_funcs->od_edit_dpm_table) {
2970 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2971 }
2972
2973 return ret;
2974 }
2975
smu_read_sensor(void * handle,int sensor,void * data,int * size_arg)2976 static int smu_read_sensor(void *handle,
2977 int sensor,
2978 void *data,
2979 int *size_arg)
2980 {
2981 struct smu_context *smu = handle;
2982 struct smu_umd_pstate_table *pstate_table =
2983 &smu->pstate_table;
2984 int ret = 0;
2985 uint32_t *size, size_val;
2986
2987 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2988 return -EOPNOTSUPP;
2989
2990 if (!data || !size_arg)
2991 return -EINVAL;
2992
2993 size_val = *size_arg;
2994 size = &size_val;
2995
2996 if (smu->ppt_funcs->read_sensor)
2997 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2998 goto unlock;
2999
3000 switch (sensor) {
3001 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
3002 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
3003 *size = 4;
3004 break;
3005 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
3006 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
3007 *size = 4;
3008 break;
3009 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
3010 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
3011 *size = 4;
3012 break;
3013 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
3014 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
3015 *size = 4;
3016 break;
3017 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3018 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
3019 *size = 8;
3020 break;
3021 case AMDGPU_PP_SENSOR_UVD_POWER:
3022 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
3023 *size = 4;
3024 break;
3025 case AMDGPU_PP_SENSOR_VCE_POWER:
3026 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
3027 *size = 4;
3028 break;
3029 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
3030 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
3031 *size = 4;
3032 break;
3033 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
3034 *(uint32_t *)data = 0;
3035 *size = 4;
3036 break;
3037 default:
3038 *size = 0;
3039 ret = -EOPNOTSUPP;
3040 break;
3041 }
3042
3043 unlock:
3044 // assign uint32_t to int
3045 *size_arg = size_val;
3046
3047 return ret;
3048 }
3049
smu_get_apu_thermal_limit(void * handle,uint32_t * limit)3050 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
3051 {
3052 int ret = -EOPNOTSUPP;
3053 struct smu_context *smu = handle;
3054
3055 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3056 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3057
3058 return ret;
3059 }
3060
smu_set_apu_thermal_limit(void * handle,uint32_t limit)3061 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3062 {
3063 int ret = -EOPNOTSUPP;
3064 struct smu_context *smu = handle;
3065
3066 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3067 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3068
3069 return ret;
3070 }
3071
smu_get_power_profile_mode(void * handle,char * buf)3072 static int smu_get_power_profile_mode(void *handle, char *buf)
3073 {
3074 struct smu_context *smu = handle;
3075
3076 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3077 !smu->ppt_funcs->get_power_profile_mode)
3078 return -EOPNOTSUPP;
3079 if (!buf)
3080 return -EINVAL;
3081
3082 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3083 }
3084
smu_set_power_profile_mode(void * handle,long * param,uint32_t param_size)3085 static int smu_set_power_profile_mode(void *handle,
3086 long *param,
3087 uint32_t param_size)
3088 {
3089 struct smu_context *smu = handle;
3090 bool custom = false;
3091 int ret = 0;
3092
3093 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3094 !smu->ppt_funcs->set_power_profile_mode)
3095 return -EOPNOTSUPP;
3096
3097 if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
3098 custom = true;
3099 /* clear frontend mask so custom changes propogate */
3100 smu->workload_mask = 0;
3101 }
3102
3103 if ((param[param_size] != smu->power_profile_mode) || custom) {
3104 /* clear the old user preference */
3105 smu_power_profile_mode_put(smu, smu->power_profile_mode);
3106 /* set the new user preference */
3107 smu_power_profile_mode_get(smu, param[param_size]);
3108 ret = smu_bump_power_profile_mode(smu,
3109 custom ? param : NULL,
3110 custom ? param_size : 0);
3111 if (ret)
3112 smu_power_profile_mode_put(smu, param[param_size]);
3113 else
3114 /* store the user's preference */
3115 smu->power_profile_mode = param[param_size];
3116 }
3117
3118 return ret;
3119 }
3120
smu_get_fan_control_mode(void * handle,u32 * fan_mode)3121 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3122 {
3123 struct smu_context *smu = handle;
3124
3125 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3126 return -EOPNOTSUPP;
3127
3128 if (!smu->ppt_funcs->get_fan_control_mode)
3129 return -EOPNOTSUPP;
3130
3131 if (!fan_mode)
3132 return -EINVAL;
3133
3134 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3135
3136 return 0;
3137 }
3138
smu_set_fan_control_mode(void * handle,u32 value)3139 static int smu_set_fan_control_mode(void *handle, u32 value)
3140 {
3141 struct smu_context *smu = handle;
3142 int ret = 0;
3143
3144 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3145 return -EOPNOTSUPP;
3146
3147 if (!smu->ppt_funcs->set_fan_control_mode)
3148 return -EOPNOTSUPP;
3149
3150 if (value == U32_MAX)
3151 return -EINVAL;
3152
3153 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3154 if (ret)
3155 goto out;
3156
3157 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3158 smu->user_dpm_profile.fan_mode = value;
3159
3160 /* reset user dpm fan speed */
3161 if (value != AMD_FAN_CTRL_MANUAL) {
3162 smu->user_dpm_profile.fan_speed_pwm = 0;
3163 smu->user_dpm_profile.fan_speed_rpm = 0;
3164 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3165 }
3166 }
3167
3168 out:
3169 return ret;
3170 }
3171
smu_get_fan_speed_pwm(void * handle,u32 * speed)3172 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3173 {
3174 struct smu_context *smu = handle;
3175 int ret = 0;
3176
3177 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3178 return -EOPNOTSUPP;
3179
3180 if (!smu->ppt_funcs->get_fan_speed_pwm)
3181 return -EOPNOTSUPP;
3182
3183 if (!speed)
3184 return -EINVAL;
3185
3186 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3187
3188 return ret;
3189 }
3190
smu_set_fan_speed_pwm(void * handle,u32 speed)3191 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3192 {
3193 struct smu_context *smu = handle;
3194 int ret = 0;
3195
3196 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3197 return -EOPNOTSUPP;
3198
3199 if (!smu->ppt_funcs->set_fan_speed_pwm)
3200 return -EOPNOTSUPP;
3201
3202 if (speed == U32_MAX)
3203 return -EINVAL;
3204
3205 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3206 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3207 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3208 smu->user_dpm_profile.fan_speed_pwm = speed;
3209
3210 /* Override custom RPM setting as they cannot co-exist */
3211 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3212 smu->user_dpm_profile.fan_speed_rpm = 0;
3213 }
3214
3215 return ret;
3216 }
3217
smu_get_fan_speed_rpm(void * handle,uint32_t * speed)3218 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3219 {
3220 struct smu_context *smu = handle;
3221 int ret = 0;
3222
3223 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3224 return -EOPNOTSUPP;
3225
3226 if (!smu->ppt_funcs->get_fan_speed_rpm)
3227 return -EOPNOTSUPP;
3228
3229 if (!speed)
3230 return -EINVAL;
3231
3232 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3233
3234 return ret;
3235 }
3236
smu_set_deep_sleep_dcefclk(void * handle,uint32_t clk)3237 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3238 {
3239 struct smu_context *smu = handle;
3240
3241 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3242 return -EOPNOTSUPP;
3243
3244 return smu_set_min_dcef_deep_sleep(smu, clk);
3245 }
3246
smu_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)3247 static int smu_get_clock_by_type_with_latency(void *handle,
3248 enum amd_pp_clock_type type,
3249 struct pp_clock_levels_with_latency *clocks)
3250 {
3251 struct smu_context *smu = handle;
3252 enum smu_clk_type clk_type;
3253 int ret = 0;
3254
3255 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3256 return -EOPNOTSUPP;
3257
3258 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3259 switch (type) {
3260 case amd_pp_sys_clock:
3261 clk_type = SMU_GFXCLK;
3262 break;
3263 case amd_pp_mem_clock:
3264 clk_type = SMU_MCLK;
3265 break;
3266 case amd_pp_dcef_clock:
3267 clk_type = SMU_DCEFCLK;
3268 break;
3269 case amd_pp_disp_clock:
3270 clk_type = SMU_DISPCLK;
3271 break;
3272 default:
3273 dev_err(smu->adev->dev, "Invalid clock type!\n");
3274 return -EINVAL;
3275 }
3276
3277 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3278 }
3279
3280 return ret;
3281 }
3282
smu_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock_req)3283 static int smu_display_clock_voltage_request(void *handle,
3284 struct pp_display_clock_request *clock_req)
3285 {
3286 struct smu_context *smu = handle;
3287 int ret = 0;
3288
3289 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3290 return -EOPNOTSUPP;
3291
3292 if (smu->ppt_funcs->display_clock_voltage_request)
3293 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3294
3295 return ret;
3296 }
3297
3298
smu_display_disable_memory_clock_switch(void * handle,bool disable_memory_clock_switch)3299 static int smu_display_disable_memory_clock_switch(void *handle,
3300 bool disable_memory_clock_switch)
3301 {
3302 struct smu_context *smu = handle;
3303 int ret = -EINVAL;
3304
3305 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3306 return -EOPNOTSUPP;
3307
3308 if (smu->ppt_funcs->display_disable_memory_clock_switch)
3309 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3310
3311 return ret;
3312 }
3313
smu_set_xgmi_pstate(void * handle,uint32_t pstate)3314 static int smu_set_xgmi_pstate(void *handle,
3315 uint32_t pstate)
3316 {
3317 struct smu_context *smu = handle;
3318 int ret = 0;
3319
3320 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3321 return -EOPNOTSUPP;
3322
3323 if (smu->ppt_funcs->set_xgmi_pstate)
3324 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3325
3326 if (ret)
3327 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3328
3329 return ret;
3330 }
3331
smu_get_baco_capability(void * handle)3332 static int smu_get_baco_capability(void *handle)
3333 {
3334 struct smu_context *smu = handle;
3335
3336 if (!smu->pm_enabled)
3337 return false;
3338
3339 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3340 return false;
3341
3342 return smu->ppt_funcs->get_bamaco_support(smu);
3343 }
3344
smu_baco_set_state(void * handle,int state)3345 static int smu_baco_set_state(void *handle, int state)
3346 {
3347 struct smu_context *smu = handle;
3348 int ret = 0;
3349
3350 if (!smu->pm_enabled)
3351 return -EOPNOTSUPP;
3352
3353 if (state == 0) {
3354 if (smu->ppt_funcs->baco_exit)
3355 ret = smu->ppt_funcs->baco_exit(smu);
3356 } else if (state == 1) {
3357 if (smu->ppt_funcs->baco_enter)
3358 ret = smu->ppt_funcs->baco_enter(smu);
3359 } else {
3360 return -EINVAL;
3361 }
3362
3363 if (ret)
3364 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3365 (state)?"enter":"exit");
3366
3367 return ret;
3368 }
3369
smu_mode1_reset_is_support(struct smu_context * smu)3370 bool smu_mode1_reset_is_support(struct smu_context *smu)
3371 {
3372 bool ret = false;
3373
3374 if (!smu->pm_enabled)
3375 return false;
3376
3377 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3378 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3379
3380 return ret;
3381 }
3382
smu_mode2_reset_is_support(struct smu_context * smu)3383 bool smu_mode2_reset_is_support(struct smu_context *smu)
3384 {
3385 bool ret = false;
3386
3387 if (!smu->pm_enabled)
3388 return false;
3389
3390 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
3391 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
3392
3393 return ret;
3394 }
3395
smu_mode1_reset(struct smu_context * smu)3396 int smu_mode1_reset(struct smu_context *smu)
3397 {
3398 int ret = 0;
3399
3400 if (!smu->pm_enabled)
3401 return -EOPNOTSUPP;
3402
3403 if (smu->ppt_funcs->mode1_reset)
3404 ret = smu->ppt_funcs->mode1_reset(smu);
3405
3406 return ret;
3407 }
3408
smu_mode2_reset(void * handle)3409 static int smu_mode2_reset(void *handle)
3410 {
3411 struct smu_context *smu = handle;
3412 int ret = 0;
3413
3414 if (!smu->pm_enabled)
3415 return -EOPNOTSUPP;
3416
3417 if (smu->ppt_funcs->mode2_reset)
3418 ret = smu->ppt_funcs->mode2_reset(smu);
3419
3420 if (ret)
3421 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3422
3423 return ret;
3424 }
3425
smu_enable_gfx_features(void * handle)3426 static int smu_enable_gfx_features(void *handle)
3427 {
3428 struct smu_context *smu = handle;
3429 int ret = 0;
3430
3431 if (!smu->pm_enabled)
3432 return -EOPNOTSUPP;
3433
3434 if (smu->ppt_funcs->enable_gfx_features)
3435 ret = smu->ppt_funcs->enable_gfx_features(smu);
3436
3437 if (ret)
3438 dev_err(smu->adev->dev, "enable gfx features failed!\n");
3439
3440 return ret;
3441 }
3442
smu_get_max_sustainable_clocks_by_dc(void * handle,struct pp_smu_nv_clock_table * max_clocks)3443 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3444 struct pp_smu_nv_clock_table *max_clocks)
3445 {
3446 struct smu_context *smu = handle;
3447 int ret = 0;
3448
3449 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3450 return -EOPNOTSUPP;
3451
3452 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3453 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3454
3455 return ret;
3456 }
3457
smu_get_uclk_dpm_states(void * handle,unsigned int * clock_values_in_khz,unsigned int * num_states)3458 static int smu_get_uclk_dpm_states(void *handle,
3459 unsigned int *clock_values_in_khz,
3460 unsigned int *num_states)
3461 {
3462 struct smu_context *smu = handle;
3463 int ret = 0;
3464
3465 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3466 return -EOPNOTSUPP;
3467
3468 if (smu->ppt_funcs->get_uclk_dpm_states)
3469 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3470
3471 return ret;
3472 }
3473
smu_get_current_power_state(void * handle)3474 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3475 {
3476 struct smu_context *smu = handle;
3477 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3478
3479 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3480 return -EOPNOTSUPP;
3481
3482 if (smu->ppt_funcs->get_current_power_state)
3483 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3484
3485 return pm_state;
3486 }
3487
smu_get_dpm_clock_table(void * handle,struct dpm_clocks * clock_table)3488 static int smu_get_dpm_clock_table(void *handle,
3489 struct dpm_clocks *clock_table)
3490 {
3491 struct smu_context *smu = handle;
3492 int ret = 0;
3493
3494 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3495 return -EOPNOTSUPP;
3496
3497 if (smu->ppt_funcs->get_dpm_clock_table)
3498 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3499
3500 return ret;
3501 }
3502
smu_sys_get_gpu_metrics(void * handle,void ** table)3503 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3504 {
3505 struct smu_context *smu = handle;
3506
3507 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3508 return -EOPNOTSUPP;
3509
3510 if (!smu->ppt_funcs->get_gpu_metrics)
3511 return -EOPNOTSUPP;
3512
3513 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3514 }
3515
smu_sys_get_pm_metrics(void * handle,void * pm_metrics,size_t size)3516 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3517 size_t size)
3518 {
3519 struct smu_context *smu = handle;
3520
3521 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3522 return -EOPNOTSUPP;
3523
3524 if (!smu->ppt_funcs->get_pm_metrics)
3525 return -EOPNOTSUPP;
3526
3527 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3528 }
3529
smu_enable_mgpu_fan_boost(void * handle)3530 static int smu_enable_mgpu_fan_boost(void *handle)
3531 {
3532 struct smu_context *smu = handle;
3533 int ret = 0;
3534
3535 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3536 return -EOPNOTSUPP;
3537
3538 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3539 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3540
3541 return ret;
3542 }
3543
smu_gfx_state_change_set(void * handle,uint32_t state)3544 static int smu_gfx_state_change_set(void *handle,
3545 uint32_t state)
3546 {
3547 struct smu_context *smu = handle;
3548 int ret = 0;
3549
3550 if (smu->ppt_funcs->gfx_state_change_set)
3551 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3552
3553 return ret;
3554 }
3555
smu_handle_passthrough_sbr(struct smu_context * smu,bool enable)3556 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3557 {
3558 int ret = 0;
3559
3560 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3561 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3562
3563 return ret;
3564 }
3565
smu_get_ecc_info(struct smu_context * smu,void * umc_ecc)3566 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3567 {
3568 int ret = -EOPNOTSUPP;
3569
3570 if (smu->ppt_funcs &&
3571 smu->ppt_funcs->get_ecc_info)
3572 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3573
3574 return ret;
3575
3576 }
3577
smu_get_prv_buffer_details(void * handle,void ** addr,size_t * size)3578 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3579 {
3580 struct smu_context *smu = handle;
3581 struct smu_table_context *smu_table = &smu->smu_table;
3582 struct smu_table *memory_pool = &smu_table->memory_pool;
3583
3584 if (!addr || !size)
3585 return -EINVAL;
3586
3587 *addr = NULL;
3588 *size = 0;
3589 if (memory_pool->bo) {
3590 *addr = memory_pool->cpu_addr;
3591 *size = memory_pool->size;
3592 }
3593
3594 return 0;
3595 }
3596
smu_print_dpm_policy(struct smu_dpm_policy * policy,char * sysbuf,size_t * size)3597 static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3598 size_t *size)
3599 {
3600 size_t offset = *size;
3601 int level;
3602
3603 for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3604 if (level == policy->current_level)
3605 offset += sysfs_emit_at(sysbuf, offset,
3606 "%d : %s*\n", level,
3607 policy->desc->get_desc(policy, level));
3608 else
3609 offset += sysfs_emit_at(sysbuf, offset,
3610 "%d : %s\n", level,
3611 policy->desc->get_desc(policy, level));
3612 }
3613
3614 *size = offset;
3615 }
3616
smu_get_pm_policy_info(struct smu_context * smu,enum pp_pm_policy p_type,char * sysbuf)3617 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3618 enum pp_pm_policy p_type, char *sysbuf)
3619 {
3620 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3621 struct smu_dpm_policy_ctxt *policy_ctxt;
3622 struct smu_dpm_policy *dpm_policy;
3623 size_t offset = 0;
3624
3625 policy_ctxt = dpm_ctxt->dpm_policies;
3626 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3627 !policy_ctxt->policy_mask)
3628 return -EOPNOTSUPP;
3629
3630 if (p_type == PP_PM_POLICY_NONE)
3631 return -EINVAL;
3632
3633 dpm_policy = smu_get_pm_policy(smu, p_type);
3634 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3635 return -ENOENT;
3636
3637 if (!sysbuf)
3638 return -EINVAL;
3639
3640 smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3641
3642 return offset;
3643 }
3644
smu_get_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type)3645 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3646 enum pp_pm_policy p_type)
3647 {
3648 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3649 struct smu_dpm_policy_ctxt *policy_ctxt;
3650 int i;
3651
3652 policy_ctxt = dpm_ctxt->dpm_policies;
3653 if (!policy_ctxt)
3654 return NULL;
3655
3656 for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3657 if (policy_ctxt->policies[i].policy_type == p_type)
3658 return &policy_ctxt->policies[i];
3659 }
3660
3661 return NULL;
3662 }
3663
smu_set_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type,int level)3664 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3665 int level)
3666 {
3667 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3668 struct smu_dpm_policy *dpm_policy = NULL;
3669 struct smu_dpm_policy_ctxt *policy_ctxt;
3670 int ret = -EOPNOTSUPP;
3671
3672 policy_ctxt = dpm_ctxt->dpm_policies;
3673 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3674 !policy_ctxt->policy_mask)
3675 return ret;
3676
3677 if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3678 return -EINVAL;
3679
3680 dpm_policy = smu_get_pm_policy(smu, p_type);
3681
3682 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3683 return ret;
3684
3685 if (dpm_policy->current_level == level)
3686 return 0;
3687
3688 ret = dpm_policy->set_policy(smu, level);
3689
3690 if (!ret)
3691 dpm_policy->current_level = level;
3692
3693 return ret;
3694 }
3695
3696 static const struct amd_pm_funcs swsmu_pm_funcs = {
3697 /* export for sysfs */
3698 .set_fan_control_mode = smu_set_fan_control_mode,
3699 .get_fan_control_mode = smu_get_fan_control_mode,
3700 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3701 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3702 .force_clock_level = smu_force_ppclk_levels,
3703 .print_clock_levels = smu_print_ppclk_levels,
3704 .emit_clock_levels = smu_emit_ppclk_levels,
3705 .force_performance_level = smu_force_performance_level,
3706 .read_sensor = smu_read_sensor,
3707 .get_apu_thermal_limit = smu_get_apu_thermal_limit,
3708 .set_apu_thermal_limit = smu_set_apu_thermal_limit,
3709 .get_performance_level = smu_get_performance_level,
3710 .get_current_power_state = smu_get_current_power_state,
3711 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3712 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3713 .get_pp_num_states = smu_get_power_num_states,
3714 .get_pp_table = smu_sys_get_pp_table,
3715 .set_pp_table = smu_sys_set_pp_table,
3716 .switch_power_profile = smu_switch_power_profile,
3717 /* export to amdgpu */
3718 .dispatch_tasks = smu_handle_dpm_task,
3719 .load_firmware = smu_load_microcode,
3720 .set_powergating_by_smu = smu_dpm_set_power_gate,
3721 .set_power_limit = smu_set_power_limit,
3722 .get_power_limit = smu_get_power_limit,
3723 .get_power_profile_mode = smu_get_power_profile_mode,
3724 .set_power_profile_mode = smu_set_power_profile_mode,
3725 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3726 .set_mp1_state = smu_set_mp1_state,
3727 .gfx_state_change_set = smu_gfx_state_change_set,
3728 /* export to DC */
3729 .get_sclk = smu_get_sclk,
3730 .get_mclk = smu_get_mclk,
3731 .display_configuration_change = smu_display_configuration_change,
3732 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3733 .display_clock_voltage_request = smu_display_clock_voltage_request,
3734 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3735 .set_active_display_count = smu_set_display_count,
3736 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3737 .get_asic_baco_capability = smu_get_baco_capability,
3738 .set_asic_baco_state = smu_baco_set_state,
3739 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3740 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3741 .asic_reset_mode_2 = smu_mode2_reset,
3742 .asic_reset_enable_gfx_features = smu_enable_gfx_features,
3743 .set_df_cstate = smu_set_df_cstate,
3744 .set_xgmi_pstate = smu_set_xgmi_pstate,
3745 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3746 .get_pm_metrics = smu_sys_get_pm_metrics,
3747 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3748 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3749 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3750 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3751 .get_dpm_clock_table = smu_get_dpm_clock_table,
3752 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3753 };
3754
smu_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)3755 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3756 uint64_t event_arg)
3757 {
3758 int ret = -EINVAL;
3759
3760 if (smu->ppt_funcs->wait_for_event)
3761 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3762
3763 return ret;
3764 }
3765
smu_stb_collect_info(struct smu_context * smu,void * buf,uint32_t size)3766 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3767 {
3768
3769 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3770 return -EOPNOTSUPP;
3771
3772 /* Confirm the buffer allocated is of correct size */
3773 if (size != smu->stb_context.stb_buf_size)
3774 return -EINVAL;
3775
3776 /*
3777 * No need to lock smu mutex as we access STB directly through MMIO
3778 * and not going through SMU messaging route (for now at least).
3779 * For registers access rely on implementation internal locking.
3780 */
3781 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3782 }
3783
3784 #if defined(CONFIG_DEBUG_FS)
3785
smu_stb_debugfs_open(struct inode * inode,struct file * filp)3786 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3787 {
3788 struct amdgpu_device *adev = filp->f_inode->i_private;
3789 struct smu_context *smu = adev->powerplay.pp_handle;
3790 unsigned char *buf;
3791 int r;
3792
3793 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3794 if (!buf)
3795 return -ENOMEM;
3796
3797 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3798 if (r)
3799 goto out;
3800
3801 filp->private_data = buf;
3802
3803 return 0;
3804
3805 out:
3806 kvfree(buf);
3807 return r;
3808 }
3809
smu_stb_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)3810 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3811 loff_t *pos)
3812 {
3813 struct amdgpu_device *adev = filp->f_inode->i_private;
3814 struct smu_context *smu = adev->powerplay.pp_handle;
3815
3816
3817 if (!filp->private_data)
3818 return -EINVAL;
3819
3820 return simple_read_from_buffer(buf,
3821 size,
3822 pos, filp->private_data,
3823 smu->stb_context.stb_buf_size);
3824 }
3825
smu_stb_debugfs_release(struct inode * inode,struct file * filp)3826 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3827 {
3828 kvfree(filp->private_data);
3829 filp->private_data = NULL;
3830
3831 return 0;
3832 }
3833
3834 /*
3835 * We have to define not only read method but also
3836 * open and release because .read takes up to PAGE_SIZE
3837 * data each time so and so is invoked multiple times.
3838 * We allocate the STB buffer in .open and release it
3839 * in .release
3840 */
3841 static const struct file_operations smu_stb_debugfs_fops = {
3842 .owner = THIS_MODULE,
3843 .open = smu_stb_debugfs_open,
3844 .read = smu_stb_debugfs_read,
3845 .release = smu_stb_debugfs_release,
3846 .llseek = default_llseek,
3847 };
3848
3849 #endif
3850
amdgpu_smu_stb_debug_fs_init(struct amdgpu_device * adev)3851 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3852 {
3853 #if defined(CONFIG_DEBUG_FS)
3854
3855 struct smu_context *smu = adev->powerplay.pp_handle;
3856
3857 if (!smu || (!smu->stb_context.stb_buf_size))
3858 return;
3859
3860 debugfs_create_file_size("amdgpu_smu_stb_dump",
3861 S_IRUSR,
3862 adev_to_drm(adev)->primary->debugfs_root,
3863 adev,
3864 &smu_stb_debugfs_fops,
3865 smu->stb_context.stb_buf_size);
3866 #endif
3867 }
3868
smu_send_hbm_bad_pages_num(struct smu_context * smu,uint32_t size)3869 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3870 {
3871 int ret = 0;
3872
3873 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3874 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3875
3876 return ret;
3877 }
3878
smu_send_hbm_bad_channel_flag(struct smu_context * smu,uint32_t size)3879 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3880 {
3881 int ret = 0;
3882
3883 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3884 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3885
3886 return ret;
3887 }
3888
smu_send_rma_reason(struct smu_context * smu)3889 int smu_send_rma_reason(struct smu_context *smu)
3890 {
3891 int ret = 0;
3892
3893 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
3894 ret = smu->ppt_funcs->send_rma_reason(smu);
3895
3896 return ret;
3897 }
3898