1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L1
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "smu_v14_0_0_ppt.h"
48 #include "smu_v14_0_2_ppt.h"
49 #include "amd_pcie.h"
50
51 /*
52 * DO NOT use these for err/warn/info/debug messages.
53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54 * They are more MGPU friendly.
55 */
56 #undef pr_err
57 #undef pr_warn
58 #undef pr_info
59 #undef pr_debug
60
61 static const struct amd_pm_funcs swsmu_pm_funcs;
62 static int smu_force_smuclk_levels(struct smu_context *smu,
63 enum smu_clk_type clk_type,
64 uint32_t mask);
65 static int smu_handle_task(struct smu_context *smu,
66 enum amd_dpm_forced_level level,
67 enum amd_pp_task task_id);
68 static int smu_reset(struct smu_context *smu);
69 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70 static int smu_set_fan_control_mode(void *handle, u32 value);
71 static int smu_set_power_limit(void *handle, uint32_t limit);
72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75 static void smu_power_profile_mode_get(struct smu_context *smu,
76 enum PP_SMC_POWER_PROFILE profile_mode);
77 static void smu_power_profile_mode_put(struct smu_context *smu,
78 enum PP_SMC_POWER_PROFILE profile_mode);
79
smu_sys_get_pp_feature_mask(void * handle,char * buf)80 static int smu_sys_get_pp_feature_mask(void *handle,
81 char *buf)
82 {
83 struct smu_context *smu = handle;
84
85 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
86 return -EOPNOTSUPP;
87
88 return smu_get_pp_feature_mask(smu, buf);
89 }
90
smu_sys_set_pp_feature_mask(void * handle,uint64_t new_mask)91 static int smu_sys_set_pp_feature_mask(void *handle,
92 uint64_t new_mask)
93 {
94 struct smu_context *smu = handle;
95
96 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
97 return -EOPNOTSUPP;
98
99 return smu_set_pp_feature_mask(smu, new_mask);
100 }
101
smu_set_residency_gfxoff(struct smu_context * smu,bool value)102 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
103 {
104 if (!smu->ppt_funcs->set_gfx_off_residency)
105 return -EINVAL;
106
107 return smu_set_gfx_off_residency(smu, value);
108 }
109
smu_get_residency_gfxoff(struct smu_context * smu,u32 * value)110 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
111 {
112 if (!smu->ppt_funcs->get_gfx_off_residency)
113 return -EINVAL;
114
115 return smu_get_gfx_off_residency(smu, value);
116 }
117
smu_get_entrycount_gfxoff(struct smu_context * smu,u64 * value)118 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
119 {
120 if (!smu->ppt_funcs->get_gfx_off_entrycount)
121 return -EINVAL;
122
123 return smu_get_gfx_off_entrycount(smu, value);
124 }
125
smu_get_status_gfxoff(struct smu_context * smu,uint32_t * value)126 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
127 {
128 if (!smu->ppt_funcs->get_gfx_off_status)
129 return -EINVAL;
130
131 *value = smu_get_gfx_off_status(smu);
132
133 return 0;
134 }
135
smu_set_soft_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)136 int smu_set_soft_freq_range(struct smu_context *smu,
137 enum smu_clk_type clk_type,
138 uint32_t min,
139 uint32_t max)
140 {
141 int ret = 0;
142
143 if (smu->ppt_funcs->set_soft_freq_limited_range)
144 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
145 clk_type,
146 min,
147 max,
148 false);
149
150 return ret;
151 }
152
smu_get_dpm_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)153 int smu_get_dpm_freq_range(struct smu_context *smu,
154 enum smu_clk_type clk_type,
155 uint32_t *min,
156 uint32_t *max)
157 {
158 int ret = -ENOTSUPP;
159
160 if (!min && !max)
161 return -EINVAL;
162
163 if (smu->ppt_funcs->get_dpm_ultimate_freq)
164 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
165 clk_type,
166 min,
167 max);
168
169 return ret;
170 }
171
smu_set_gfx_power_up_by_imu(struct smu_context * smu)172 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
173 {
174 int ret = 0;
175 struct amdgpu_device *adev = smu->adev;
176
177 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
178 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
179 if (ret)
180 dev_err(adev->dev, "Failed to enable gfx imu!\n");
181 }
182 return ret;
183 }
184
smu_get_mclk(void * handle,bool low)185 static u32 smu_get_mclk(void *handle, bool low)
186 {
187 struct smu_context *smu = handle;
188 uint32_t clk_freq;
189 int ret = 0;
190
191 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
192 low ? &clk_freq : NULL,
193 !low ? &clk_freq : NULL);
194 if (ret)
195 return 0;
196 return clk_freq * 100;
197 }
198
smu_get_sclk(void * handle,bool low)199 static u32 smu_get_sclk(void *handle, bool low)
200 {
201 struct smu_context *smu = handle;
202 uint32_t clk_freq;
203 int ret = 0;
204
205 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
206 low ? &clk_freq : NULL,
207 !low ? &clk_freq : NULL);
208 if (ret)
209 return 0;
210 return clk_freq * 100;
211 }
212
smu_set_gfx_imu_enable(struct smu_context * smu)213 static int smu_set_gfx_imu_enable(struct smu_context *smu)
214 {
215 struct amdgpu_device *adev = smu->adev;
216
217 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
218 return 0;
219
220 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
221 return 0;
222
223 return smu_set_gfx_power_up_by_imu(smu);
224 }
225
is_vcn_enabled(struct amdgpu_device * adev)226 static bool is_vcn_enabled(struct amdgpu_device *adev)
227 {
228 int i;
229
230 for (i = 0; i < adev->num_ip_blocks; i++) {
231 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
232 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
233 !adev->ip_blocks[i].status.valid)
234 return false;
235 }
236
237 return true;
238 }
239
smu_dpm_set_vcn_enable(struct smu_context * smu,bool enable,int inst)240 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
241 bool enable,
242 int inst)
243 {
244 struct smu_power_context *smu_power = &smu->smu_power;
245 struct smu_power_gate *power_gate = &smu_power->power_gate;
246 int ret = 0;
247
248 /*
249 * don't poweron vcn/jpeg when they are skipped.
250 */
251 if (!is_vcn_enabled(smu->adev))
252 return 0;
253
254 if (!smu->ppt_funcs->dpm_set_vcn_enable)
255 return 0;
256
257 if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)
258 return 0;
259
260 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);
261 if (!ret)
262 atomic_set(&power_gate->vcn_gated[inst], !enable);
263
264 return ret;
265 }
266
smu_dpm_set_jpeg_enable(struct smu_context * smu,bool enable)267 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
268 bool enable)
269 {
270 struct smu_power_context *smu_power = &smu->smu_power;
271 struct smu_power_gate *power_gate = &smu_power->power_gate;
272 int ret = 0;
273
274 if (!is_vcn_enabled(smu->adev))
275 return 0;
276
277 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
278 return 0;
279
280 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
281 return 0;
282
283 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
284 if (!ret)
285 atomic_set(&power_gate->jpeg_gated, !enable);
286
287 return ret;
288 }
289
smu_dpm_set_vpe_enable(struct smu_context * smu,bool enable)290 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
291 bool enable)
292 {
293 struct smu_power_context *smu_power = &smu->smu_power;
294 struct smu_power_gate *power_gate = &smu_power->power_gate;
295 int ret = 0;
296
297 if (!smu->ppt_funcs->dpm_set_vpe_enable)
298 return 0;
299
300 if (atomic_read(&power_gate->vpe_gated) ^ enable)
301 return 0;
302
303 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
304 if (!ret)
305 atomic_set(&power_gate->vpe_gated, !enable);
306
307 return ret;
308 }
309
smu_dpm_set_umsch_mm_enable(struct smu_context * smu,bool enable)310 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
311 bool enable)
312 {
313 struct smu_power_context *smu_power = &smu->smu_power;
314 struct smu_power_gate *power_gate = &smu_power->power_gate;
315 int ret = 0;
316
317 if (!smu->adev->enable_umsch_mm)
318 return 0;
319
320 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
321 return 0;
322
323 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
324 return 0;
325
326 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
327 if (!ret)
328 atomic_set(&power_gate->umsch_mm_gated, !enable);
329
330 return ret;
331 }
332
smu_set_mall_enable(struct smu_context * smu)333 static int smu_set_mall_enable(struct smu_context *smu)
334 {
335 int ret = 0;
336
337 if (!smu->ppt_funcs->set_mall_enable)
338 return 0;
339
340 ret = smu->ppt_funcs->set_mall_enable(smu);
341
342 return ret;
343 }
344
345 /**
346 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
347 *
348 * @handle: smu_context pointer
349 * @block_type: the IP block to power gate/ungate
350 * @gate: to power gate if true, ungate otherwise
351 * @inst: the instance of the IP block to power gate/ungate
352 *
353 * This API uses no smu->mutex lock protection due to:
354 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
355 * This is guarded to be race condition free by the caller.
356 * 2. Or get called on user setting request of power_dpm_force_performance_level.
357 * Under this case, the smu->mutex lock protection is already enforced on
358 * the parent API smu_force_performance_level of the call path.
359 */
smu_dpm_set_power_gate(void * handle,uint32_t block_type,bool gate,int inst)360 static int smu_dpm_set_power_gate(void *handle,
361 uint32_t block_type,
362 bool gate,
363 int inst)
364 {
365 struct smu_context *smu = handle;
366 int ret = 0;
367
368 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
369 dev_WARN(smu->adev->dev,
370 "SMU uninitialized but power %s requested for %u!\n",
371 gate ? "gate" : "ungate", block_type);
372 return -EOPNOTSUPP;
373 }
374
375 switch (block_type) {
376 /*
377 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
378 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
379 */
380 case AMD_IP_BLOCK_TYPE_UVD:
381 case AMD_IP_BLOCK_TYPE_VCN:
382 ret = smu_dpm_set_vcn_enable(smu, !gate, inst);
383 if (ret)
384 dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",
385 gate ? "gate" : "ungate", inst);
386 break;
387 case AMD_IP_BLOCK_TYPE_GFX:
388 ret = smu_gfx_off_control(smu, gate);
389 if (ret)
390 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
391 gate ? "enable" : "disable");
392 break;
393 case AMD_IP_BLOCK_TYPE_SDMA:
394 ret = smu_powergate_sdma(smu, gate);
395 if (ret)
396 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
397 gate ? "gate" : "ungate");
398 break;
399 case AMD_IP_BLOCK_TYPE_JPEG:
400 ret = smu_dpm_set_jpeg_enable(smu, !gate);
401 if (ret)
402 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
403 gate ? "gate" : "ungate");
404 break;
405 case AMD_IP_BLOCK_TYPE_VPE:
406 ret = smu_dpm_set_vpe_enable(smu, !gate);
407 if (ret)
408 dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
409 gate ? "gate" : "ungate");
410 break;
411 default:
412 dev_err(smu->adev->dev, "Unsupported block type!\n");
413 return -EINVAL;
414 }
415
416 return ret;
417 }
418
419 /**
420 * smu_set_user_clk_dependencies - set user profile clock dependencies
421 *
422 * @smu: smu_context pointer
423 * @clk: enum smu_clk_type type
424 *
425 * Enable/Disable the clock dependency for the @clk type.
426 */
smu_set_user_clk_dependencies(struct smu_context * smu,enum smu_clk_type clk)427 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
428 {
429 if (smu->adev->in_suspend)
430 return;
431
432 if (clk == SMU_MCLK) {
433 smu->user_dpm_profile.clk_dependency = 0;
434 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
435 } else if (clk == SMU_FCLK) {
436 /* MCLK takes precedence over FCLK */
437 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
438 return;
439
440 smu->user_dpm_profile.clk_dependency = 0;
441 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
442 } else if (clk == SMU_SOCCLK) {
443 /* MCLK takes precedence over SOCCLK */
444 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
445 return;
446
447 smu->user_dpm_profile.clk_dependency = 0;
448 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
449 } else
450 /* Add clk dependencies here, if any */
451 return;
452 }
453
454 /**
455 * smu_restore_dpm_user_profile - reinstate user dpm profile
456 *
457 * @smu: smu_context pointer
458 *
459 * Restore the saved user power configurations include power limit,
460 * clock frequencies, fan control mode and fan speed.
461 */
smu_restore_dpm_user_profile(struct smu_context * smu)462 static void smu_restore_dpm_user_profile(struct smu_context *smu)
463 {
464 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
465 int ret = 0;
466
467 if (!smu->adev->in_suspend)
468 return;
469
470 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
471 return;
472
473 /* Enable restore flag */
474 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
475
476 /* set the user dpm power limit */
477 if (smu->user_dpm_profile.power_limit) {
478 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
479 if (ret)
480 dev_err(smu->adev->dev, "Failed to set power limit value\n");
481 }
482
483 /* set the user dpm clock configurations */
484 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
485 enum smu_clk_type clk_type;
486
487 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
488 /*
489 * Iterate over smu clk type and force the saved user clk
490 * configs, skip if clock dependency is enabled
491 */
492 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
493 smu->user_dpm_profile.clk_mask[clk_type]) {
494 ret = smu_force_smuclk_levels(smu, clk_type,
495 smu->user_dpm_profile.clk_mask[clk_type]);
496 if (ret)
497 dev_err(smu->adev->dev,
498 "Failed to set clock type = %d\n", clk_type);
499 }
500 }
501 }
502
503 /* set the user dpm fan configurations */
504 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
505 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
506 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
507 if (ret != -EOPNOTSUPP) {
508 smu->user_dpm_profile.fan_speed_pwm = 0;
509 smu->user_dpm_profile.fan_speed_rpm = 0;
510 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
511 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
512 }
513
514 if (smu->user_dpm_profile.fan_speed_pwm) {
515 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
516 if (ret != -EOPNOTSUPP)
517 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
518 }
519
520 if (smu->user_dpm_profile.fan_speed_rpm) {
521 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
522 if (ret != -EOPNOTSUPP)
523 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
524 }
525 }
526
527 /* Restore user customized OD settings */
528 if (smu->user_dpm_profile.user_od) {
529 if (smu->ppt_funcs->restore_user_od_settings) {
530 ret = smu->ppt_funcs->restore_user_od_settings(smu);
531 if (ret)
532 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
533 }
534 }
535
536 /* Disable restore flag */
537 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
538 }
539
smu_get_power_num_states(void * handle,struct pp_states_info * state_info)540 static int smu_get_power_num_states(void *handle,
541 struct pp_states_info *state_info)
542 {
543 if (!state_info)
544 return -EINVAL;
545
546 /* not support power state */
547 memset(state_info, 0, sizeof(struct pp_states_info));
548 state_info->nums = 1;
549 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
550
551 return 0;
552 }
553
is_support_sw_smu(struct amdgpu_device * adev)554 bool is_support_sw_smu(struct amdgpu_device *adev)
555 {
556 /* vega20 is 11.0.2, but it's supported via the powerplay code */
557 if (adev->asic_type == CHIP_VEGA20)
558 return false;
559
560 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
561 amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
562 return true;
563
564 return false;
565 }
566
is_support_cclk_dpm(struct amdgpu_device * adev)567 bool is_support_cclk_dpm(struct amdgpu_device *adev)
568 {
569 struct smu_context *smu = adev->powerplay.pp_handle;
570
571 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
572 return false;
573
574 return true;
575 }
576
577
smu_sys_get_pp_table(void * handle,char ** table)578 static int smu_sys_get_pp_table(void *handle,
579 char **table)
580 {
581 struct smu_context *smu = handle;
582 struct smu_table_context *smu_table = &smu->smu_table;
583
584 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
585 return -EOPNOTSUPP;
586
587 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
588 return -EINVAL;
589
590 if (smu_table->hardcode_pptable)
591 *table = smu_table->hardcode_pptable;
592 else
593 *table = smu_table->power_play_table;
594
595 return smu_table->power_play_table_size;
596 }
597
smu_sys_set_pp_table(void * handle,const char * buf,size_t size)598 static int smu_sys_set_pp_table(void *handle,
599 const char *buf,
600 size_t size)
601 {
602 struct smu_context *smu = handle;
603 struct smu_table_context *smu_table = &smu->smu_table;
604 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
605 int ret = 0;
606
607 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
608 return -EOPNOTSUPP;
609
610 if (header->usStructureSize != size) {
611 dev_err(smu->adev->dev, "pp table size not matched !\n");
612 return -EIO;
613 }
614
615 if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
616 kfree(smu_table->hardcode_pptable);
617 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
618 if (!smu_table->hardcode_pptable)
619 return -ENOMEM;
620 }
621
622 memcpy(smu_table->hardcode_pptable, buf, size);
623 smu_table->power_play_table = smu_table->hardcode_pptable;
624 smu_table->power_play_table_size = size;
625
626 /*
627 * Special hw_fini action(for Navi1x, the DPMs disablement will be
628 * skipped) may be needed for custom pptable uploading.
629 */
630 smu->uploading_custom_pp_table = true;
631
632 ret = smu_reset(smu);
633 if (ret)
634 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
635
636 smu->uploading_custom_pp_table = false;
637
638 return ret;
639 }
640
smu_get_driver_allowed_feature_mask(struct smu_context * smu)641 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
642 {
643 struct smu_feature *feature = &smu->smu_feature;
644 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
645 int ret = 0;
646
647 /*
648 * With SCPM enabled, the allowed featuremasks setting(via
649 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
650 * That means there is no way to let PMFW knows the settings below.
651 * Thus, we just assume all the features are allowed under
652 * such scenario.
653 */
654 if (smu->adev->scpm_enabled) {
655 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
656 return 0;
657 }
658
659 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
660
661 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
662 SMU_FEATURE_MAX/32);
663 if (ret)
664 return ret;
665
666 bitmap_or(feature->allowed, feature->allowed,
667 (unsigned long *)allowed_feature_mask,
668 feature->feature_num);
669
670 return ret;
671 }
672
smu_set_funcs(struct amdgpu_device * adev)673 static int smu_set_funcs(struct amdgpu_device *adev)
674 {
675 struct smu_context *smu = adev->powerplay.pp_handle;
676
677 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
678 smu->od_enabled = true;
679
680 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
681 case IP_VERSION(11, 0, 0):
682 case IP_VERSION(11, 0, 5):
683 case IP_VERSION(11, 0, 9):
684 navi10_set_ppt_funcs(smu);
685 break;
686 case IP_VERSION(11, 0, 7):
687 case IP_VERSION(11, 0, 11):
688 case IP_VERSION(11, 0, 12):
689 case IP_VERSION(11, 0, 13):
690 sienna_cichlid_set_ppt_funcs(smu);
691 break;
692 case IP_VERSION(12, 0, 0):
693 case IP_VERSION(12, 0, 1):
694 renoir_set_ppt_funcs(smu);
695 break;
696 case IP_VERSION(11, 5, 0):
697 case IP_VERSION(11, 5, 2):
698 vangogh_set_ppt_funcs(smu);
699 break;
700 case IP_VERSION(13, 0, 1):
701 case IP_VERSION(13, 0, 3):
702 case IP_VERSION(13, 0, 8):
703 yellow_carp_set_ppt_funcs(smu);
704 break;
705 case IP_VERSION(13, 0, 4):
706 case IP_VERSION(13, 0, 11):
707 smu_v13_0_4_set_ppt_funcs(smu);
708 break;
709 case IP_VERSION(13, 0, 5):
710 smu_v13_0_5_set_ppt_funcs(smu);
711 break;
712 case IP_VERSION(11, 0, 8):
713 cyan_skillfish_set_ppt_funcs(smu);
714 break;
715 case IP_VERSION(11, 0, 2):
716 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
717 arcturus_set_ppt_funcs(smu);
718 /* OD is not supported on Arcturus */
719 smu->od_enabled = false;
720 break;
721 case IP_VERSION(13, 0, 2):
722 aldebaran_set_ppt_funcs(smu);
723 /* Enable pp_od_clk_voltage node */
724 smu->od_enabled = true;
725 break;
726 case IP_VERSION(13, 0, 0):
727 case IP_VERSION(13, 0, 10):
728 smu_v13_0_0_set_ppt_funcs(smu);
729 break;
730 case IP_VERSION(13, 0, 6):
731 case IP_VERSION(13, 0, 14):
732 case IP_VERSION(13, 0, 12):
733 smu_v13_0_6_set_ppt_funcs(smu);
734 /* Enable pp_od_clk_voltage node */
735 smu->od_enabled = true;
736 break;
737 case IP_VERSION(13, 0, 7):
738 smu_v13_0_7_set_ppt_funcs(smu);
739 break;
740 case IP_VERSION(14, 0, 0):
741 case IP_VERSION(14, 0, 1):
742 case IP_VERSION(14, 0, 4):
743 case IP_VERSION(14, 0, 5):
744 smu_v14_0_0_set_ppt_funcs(smu);
745 break;
746 case IP_VERSION(14, 0, 2):
747 case IP_VERSION(14, 0, 3):
748 smu_v14_0_2_set_ppt_funcs(smu);
749 break;
750 default:
751 return -EINVAL;
752 }
753
754 return 0;
755 }
756
smu_early_init(struct amdgpu_ip_block * ip_block)757 static int smu_early_init(struct amdgpu_ip_block *ip_block)
758 {
759 struct amdgpu_device *adev = ip_block->adev;
760 struct smu_context *smu;
761 int r;
762
763 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
764 if (!smu)
765 return -ENOMEM;
766
767 smu->adev = adev;
768 smu->pm_enabled = !!amdgpu_dpm;
769 smu->is_apu = false;
770 smu->smu_baco.state = SMU_BACO_STATE_NONE;
771 smu->smu_baco.platform_support = false;
772 smu->smu_baco.maco_support = false;
773 smu->user_dpm_profile.fan_mode = -1;
774 smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
775
776 mutex_init(&smu->message_lock);
777
778 adev->powerplay.pp_handle = smu;
779 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
780
781 r = smu_set_funcs(adev);
782 if (r)
783 return r;
784 return smu_init_microcode(smu);
785 }
786
smu_set_default_dpm_table(struct smu_context * smu)787 static int smu_set_default_dpm_table(struct smu_context *smu)
788 {
789 struct amdgpu_device *adev = smu->adev;
790 struct smu_power_context *smu_power = &smu->smu_power;
791 struct smu_power_gate *power_gate = &smu_power->power_gate;
792 int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;
793 int ret = 0;
794
795 if (!smu->ppt_funcs->set_default_dpm_table)
796 return 0;
797
798 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
799 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
800 vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);
801 }
802 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
803 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
804
805 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
806 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
807 ret = smu_dpm_set_vcn_enable(smu, true, i);
808 if (ret)
809 return ret;
810 }
811 }
812
813 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
814 ret = smu_dpm_set_jpeg_enable(smu, true);
815 if (ret)
816 goto err_out;
817 }
818
819 ret = smu->ppt_funcs->set_default_dpm_table(smu);
820 if (ret)
821 dev_err(smu->adev->dev,
822 "Failed to setup default dpm clock tables!\n");
823
824 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
825 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
826 err_out:
827 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
828 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
829 smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);
830 }
831
832 return ret;
833 }
834
smu_apply_default_config_table_settings(struct smu_context * smu)835 static int smu_apply_default_config_table_settings(struct smu_context *smu)
836 {
837 struct amdgpu_device *adev = smu->adev;
838 int ret = 0;
839
840 ret = smu_get_default_config_table_settings(smu,
841 &adev->pm.config_table);
842 if (ret)
843 return ret;
844
845 return smu_set_config_table(smu, &adev->pm.config_table);
846 }
847
smu_late_init(struct amdgpu_ip_block * ip_block)848 static int smu_late_init(struct amdgpu_ip_block *ip_block)
849 {
850 struct amdgpu_device *adev = ip_block->adev;
851 struct smu_context *smu = adev->powerplay.pp_handle;
852 int ret = 0;
853
854 smu_set_fine_grain_gfx_freq_parameters(smu);
855
856 if (!smu->pm_enabled)
857 return 0;
858
859 ret = smu_post_init(smu);
860 if (ret) {
861 dev_err(adev->dev, "Failed to post smu init!\n");
862 return ret;
863 }
864
865 /*
866 * Explicitly notify PMFW the power mode the system in. Since
867 * the PMFW may boot the ASIC with a different mode.
868 * For those supporting ACDC switch via gpio, PMFW will
869 * handle the switch automatically. Driver involvement
870 * is unnecessary.
871 */
872 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
873 smu_set_ac_dc(smu);
874
875 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
876 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
877 return 0;
878
879 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
880 ret = smu_set_default_od_settings(smu);
881 if (ret) {
882 dev_err(adev->dev, "Failed to setup default OD settings!\n");
883 return ret;
884 }
885 }
886
887 ret = smu_populate_umd_state_clk(smu);
888 if (ret) {
889 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
890 return ret;
891 }
892
893 ret = smu_get_asic_power_limits(smu,
894 &smu->current_power_limit,
895 &smu->default_power_limit,
896 &smu->max_power_limit,
897 &smu->min_power_limit);
898 if (ret) {
899 dev_err(adev->dev, "Failed to get asic power limits!\n");
900 return ret;
901 }
902
903 if (!amdgpu_sriov_vf(adev))
904 smu_get_unique_id(smu);
905
906 smu_get_fan_parameters(smu);
907
908 smu_handle_task(smu,
909 smu->smu_dpm.dpm_level,
910 AMD_PP_TASK_COMPLETE_INIT);
911
912 ret = smu_apply_default_config_table_settings(smu);
913 if (ret && (ret != -EOPNOTSUPP)) {
914 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
915 return ret;
916 }
917
918 smu_restore_dpm_user_profile(smu);
919
920 return 0;
921 }
922
smu_init_fb_allocations(struct smu_context * smu)923 static int smu_init_fb_allocations(struct smu_context *smu)
924 {
925 struct amdgpu_device *adev = smu->adev;
926 struct smu_table_context *smu_table = &smu->smu_table;
927 struct smu_table *tables = smu_table->tables;
928 struct smu_table *driver_table = &(smu_table->driver_table);
929 uint32_t max_table_size = 0;
930 int ret, i;
931
932 /* VRAM allocation for tool table */
933 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
934 ret = amdgpu_bo_create_kernel(adev,
935 tables[SMU_TABLE_PMSTATUSLOG].size,
936 tables[SMU_TABLE_PMSTATUSLOG].align,
937 tables[SMU_TABLE_PMSTATUSLOG].domain,
938 &tables[SMU_TABLE_PMSTATUSLOG].bo,
939 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
940 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
941 if (ret) {
942 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
943 return ret;
944 }
945 }
946
947 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
948 /* VRAM allocation for driver table */
949 for (i = 0; i < SMU_TABLE_COUNT; i++) {
950 if (tables[i].size == 0)
951 continue;
952
953 /* If one of the tables has VRAM domain restriction, keep it in
954 * VRAM
955 */
956 if ((tables[i].domain &
957 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
958 AMDGPU_GEM_DOMAIN_VRAM)
959 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
960
961 if (i == SMU_TABLE_PMSTATUSLOG)
962 continue;
963
964 if (max_table_size < tables[i].size)
965 max_table_size = tables[i].size;
966 }
967
968 driver_table->size = max_table_size;
969 driver_table->align = PAGE_SIZE;
970
971 ret = amdgpu_bo_create_kernel(adev,
972 driver_table->size,
973 driver_table->align,
974 driver_table->domain,
975 &driver_table->bo,
976 &driver_table->mc_address,
977 &driver_table->cpu_addr);
978 if (ret) {
979 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
980 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
981 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
982 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
983 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
984 }
985
986 return ret;
987 }
988
smu_fini_fb_allocations(struct smu_context * smu)989 static int smu_fini_fb_allocations(struct smu_context *smu)
990 {
991 struct smu_table_context *smu_table = &smu->smu_table;
992 struct smu_table *tables = smu_table->tables;
993 struct smu_table *driver_table = &(smu_table->driver_table);
994
995 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
996 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
997 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
998 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
999
1000 amdgpu_bo_free_kernel(&driver_table->bo,
1001 &driver_table->mc_address,
1002 &driver_table->cpu_addr);
1003
1004 return 0;
1005 }
1006
1007 /**
1008 * smu_alloc_memory_pool - allocate memory pool in the system memory
1009 *
1010 * @smu: amdgpu_device pointer
1011 *
1012 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1013 * and DramLogSetDramAddr can notify it changed.
1014 *
1015 * Returns 0 on success, error on failure.
1016 */
smu_alloc_memory_pool(struct smu_context * smu)1017 static int smu_alloc_memory_pool(struct smu_context *smu)
1018 {
1019 struct amdgpu_device *adev = smu->adev;
1020 struct smu_table_context *smu_table = &smu->smu_table;
1021 struct smu_table *memory_pool = &smu_table->memory_pool;
1022 uint64_t pool_size = smu->pool_size;
1023 int ret = 0;
1024
1025 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1026 return ret;
1027
1028 memory_pool->size = pool_size;
1029 memory_pool->align = PAGE_SIZE;
1030 memory_pool->domain =
1031 (adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ?
1032 AMDGPU_GEM_DOMAIN_VRAM :
1033 AMDGPU_GEM_DOMAIN_GTT;
1034
1035 switch (pool_size) {
1036 case SMU_MEMORY_POOL_SIZE_256_MB:
1037 case SMU_MEMORY_POOL_SIZE_512_MB:
1038 case SMU_MEMORY_POOL_SIZE_1_GB:
1039 case SMU_MEMORY_POOL_SIZE_2_GB:
1040 ret = amdgpu_bo_create_kernel(adev,
1041 memory_pool->size,
1042 memory_pool->align,
1043 memory_pool->domain,
1044 &memory_pool->bo,
1045 &memory_pool->mc_address,
1046 &memory_pool->cpu_addr);
1047 if (ret)
1048 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1049 break;
1050 default:
1051 break;
1052 }
1053
1054 return ret;
1055 }
1056
smu_free_memory_pool(struct smu_context * smu)1057 static int smu_free_memory_pool(struct smu_context *smu)
1058 {
1059 struct smu_table_context *smu_table = &smu->smu_table;
1060 struct smu_table *memory_pool = &smu_table->memory_pool;
1061
1062 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1063 return 0;
1064
1065 amdgpu_bo_free_kernel(&memory_pool->bo,
1066 &memory_pool->mc_address,
1067 &memory_pool->cpu_addr);
1068
1069 memset(memory_pool, 0, sizeof(struct smu_table));
1070
1071 return 0;
1072 }
1073
smu_alloc_dummy_read_table(struct smu_context * smu)1074 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1075 {
1076 struct smu_table_context *smu_table = &smu->smu_table;
1077 struct smu_table *dummy_read_1_table =
1078 &smu_table->dummy_read_1_table;
1079 struct amdgpu_device *adev = smu->adev;
1080 int ret = 0;
1081
1082 if (!dummy_read_1_table->size)
1083 return 0;
1084
1085 ret = amdgpu_bo_create_kernel(adev,
1086 dummy_read_1_table->size,
1087 dummy_read_1_table->align,
1088 dummy_read_1_table->domain,
1089 &dummy_read_1_table->bo,
1090 &dummy_read_1_table->mc_address,
1091 &dummy_read_1_table->cpu_addr);
1092 if (ret)
1093 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1094
1095 return ret;
1096 }
1097
smu_free_dummy_read_table(struct smu_context * smu)1098 static void smu_free_dummy_read_table(struct smu_context *smu)
1099 {
1100 struct smu_table_context *smu_table = &smu->smu_table;
1101 struct smu_table *dummy_read_1_table =
1102 &smu_table->dummy_read_1_table;
1103
1104
1105 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1106 &dummy_read_1_table->mc_address,
1107 &dummy_read_1_table->cpu_addr);
1108
1109 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1110 }
1111
smu_smc_table_sw_init(struct smu_context * smu)1112 static int smu_smc_table_sw_init(struct smu_context *smu)
1113 {
1114 int ret;
1115
1116 /**
1117 * Create smu_table structure, and init smc tables such as
1118 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1119 */
1120 ret = smu_init_smc_tables(smu);
1121 if (ret) {
1122 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1123 return ret;
1124 }
1125
1126 /**
1127 * Create smu_power_context structure, and allocate smu_dpm_context and
1128 * context size to fill the smu_power_context data.
1129 */
1130 ret = smu_init_power(smu);
1131 if (ret) {
1132 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1133 return ret;
1134 }
1135
1136 /*
1137 * allocate vram bos to store smc table contents.
1138 */
1139 ret = smu_init_fb_allocations(smu);
1140 if (ret)
1141 return ret;
1142
1143 ret = smu_alloc_memory_pool(smu);
1144 if (ret)
1145 return ret;
1146
1147 ret = smu_alloc_dummy_read_table(smu);
1148 if (ret)
1149 return ret;
1150
1151 ret = smu_i2c_init(smu);
1152 if (ret)
1153 return ret;
1154
1155 return 0;
1156 }
1157
smu_smc_table_sw_fini(struct smu_context * smu)1158 static int smu_smc_table_sw_fini(struct smu_context *smu)
1159 {
1160 int ret;
1161
1162 smu_i2c_fini(smu);
1163
1164 smu_free_dummy_read_table(smu);
1165
1166 ret = smu_free_memory_pool(smu);
1167 if (ret)
1168 return ret;
1169
1170 ret = smu_fini_fb_allocations(smu);
1171 if (ret)
1172 return ret;
1173
1174 ret = smu_fini_power(smu);
1175 if (ret) {
1176 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1177 return ret;
1178 }
1179
1180 ret = smu_fini_smc_tables(smu);
1181 if (ret) {
1182 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1183 return ret;
1184 }
1185
1186 return 0;
1187 }
1188
smu_throttling_logging_work_fn(struct work_struct * work)1189 static void smu_throttling_logging_work_fn(struct work_struct *work)
1190 {
1191 struct smu_context *smu = container_of(work, struct smu_context,
1192 throttling_logging_work);
1193
1194 smu_log_thermal_throttling(smu);
1195 }
1196
smu_interrupt_work_fn(struct work_struct * work)1197 static void smu_interrupt_work_fn(struct work_struct *work)
1198 {
1199 struct smu_context *smu = container_of(work, struct smu_context,
1200 interrupt_work);
1201
1202 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1203 smu->ppt_funcs->interrupt_work(smu);
1204 }
1205
smu_swctf_delayed_work_handler(struct work_struct * work)1206 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1207 {
1208 struct smu_context *smu =
1209 container_of(work, struct smu_context, swctf_delayed_work.work);
1210 struct smu_temperature_range *range =
1211 &smu->thermal_range;
1212 struct amdgpu_device *adev = smu->adev;
1213 uint32_t hotspot_tmp, size;
1214
1215 /*
1216 * If the hotspot temperature is confirmed as below SW CTF setting point
1217 * after the delay enforced, nothing will be done.
1218 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1219 */
1220 if (range->software_shutdown_temp &&
1221 smu->ppt_funcs->read_sensor &&
1222 !smu->ppt_funcs->read_sensor(smu,
1223 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1224 &hotspot_tmp,
1225 &size) &&
1226 hotspot_tmp / 1000 < range->software_shutdown_temp)
1227 return;
1228
1229 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1230 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1231 orderly_poweroff(true);
1232 }
1233
smu_init_xgmi_plpd_mode(struct smu_context * smu)1234 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1235 {
1236 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1237 struct smu_dpm_policy_ctxt *policy_ctxt;
1238 struct smu_dpm_policy *policy;
1239
1240 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1241 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1242 if (policy)
1243 policy->current_level = XGMI_PLPD_DEFAULT;
1244 return;
1245 }
1246
1247 /* PMFW put PLPD into default policy after enabling the feature */
1248 if (smu_feature_is_enabled(smu,
1249 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1250 if (policy)
1251 policy->current_level = XGMI_PLPD_DEFAULT;
1252 } else {
1253 policy_ctxt = dpm_ctxt->dpm_policies;
1254 if (policy_ctxt)
1255 policy_ctxt->policy_mask &=
1256 ~BIT(PP_PM_POLICY_XGMI_PLPD);
1257 }
1258 }
1259
smu_init_power_profile(struct smu_context * smu)1260 static void smu_init_power_profile(struct smu_context *smu)
1261 {
1262 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN)
1263 smu->power_profile_mode =
1264 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1265 smu_power_profile_mode_get(smu, smu->power_profile_mode);
1266 }
1267
smu_sw_init(struct amdgpu_ip_block * ip_block)1268 static int smu_sw_init(struct amdgpu_ip_block *ip_block)
1269 {
1270 struct amdgpu_device *adev = ip_block->adev;
1271 struct smu_context *smu = adev->powerplay.pp_handle;
1272 int i, ret;
1273
1274 smu->pool_size = adev->pm.smu_prv_buffer_size;
1275 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1276 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1277 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1278
1279 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1280 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1281 atomic64_set(&smu->throttle_int_counter, 0);
1282 smu->watermarks_bitmap = 0;
1283
1284 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1285 atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
1286 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1287 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1288 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1289
1290 smu_init_power_profile(smu);
1291 smu->display_config = &adev->pm.pm_display_cfg;
1292
1293 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1294 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1295
1296 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1297 smu_swctf_delayed_work_handler);
1298
1299 ret = smu_smc_table_sw_init(smu);
1300 if (ret) {
1301 dev_err(adev->dev, "Failed to sw init smc table!\n");
1302 return ret;
1303 }
1304
1305 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1306 ret = smu_get_vbios_bootup_values(smu);
1307 if (ret) {
1308 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1309 return ret;
1310 }
1311
1312 ret = smu_init_pptable_microcode(smu);
1313 if (ret) {
1314 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1315 return ret;
1316 }
1317
1318 ret = smu_register_irq_handler(smu);
1319 if (ret) {
1320 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1321 return ret;
1322 }
1323
1324 /* If there is no way to query fan control mode, fan control is not supported */
1325 if (!smu->ppt_funcs->get_fan_control_mode)
1326 smu->adev->pm.no_fan = true;
1327
1328 return 0;
1329 }
1330
smu_sw_fini(struct amdgpu_ip_block * ip_block)1331 static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
1332 {
1333 struct amdgpu_device *adev = ip_block->adev;
1334 struct smu_context *smu = adev->powerplay.pp_handle;
1335 int ret;
1336
1337 ret = smu_smc_table_sw_fini(smu);
1338 if (ret) {
1339 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1340 return ret;
1341 }
1342
1343 if (smu->custom_profile_params) {
1344 kfree(smu->custom_profile_params);
1345 smu->custom_profile_params = NULL;
1346 }
1347
1348 smu_fini_microcode(smu);
1349
1350 return 0;
1351 }
1352
smu_get_thermal_temperature_range(struct smu_context * smu)1353 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1354 {
1355 struct amdgpu_device *adev = smu->adev;
1356 struct smu_temperature_range *range =
1357 &smu->thermal_range;
1358 int ret = 0;
1359
1360 if (!smu->ppt_funcs->get_thermal_temperature_range)
1361 return 0;
1362
1363 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1364 if (ret)
1365 return ret;
1366
1367 adev->pm.dpm.thermal.min_temp = range->min;
1368 adev->pm.dpm.thermal.max_temp = range->max;
1369 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1370 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1371 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1372 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1373 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1374 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1375 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1376
1377 return ret;
1378 }
1379
1380 /**
1381 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1382 *
1383 * @smu: smu_context pointer
1384 *
1385 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1386 * Returns 0 on success, error on failure.
1387 */
smu_wbrf_handle_exclusion_ranges(struct smu_context * smu)1388 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1389 {
1390 struct wbrf_ranges_in_out wbrf_exclusion = {0};
1391 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1392 struct amdgpu_device *adev = smu->adev;
1393 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1394 uint64_t start, end;
1395 int ret, i, j;
1396
1397 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1398 if (ret) {
1399 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1400 return ret;
1401 }
1402
1403 /*
1404 * The exclusion ranges array we got might be filled with holes and duplicate
1405 * entries. For example:
1406 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1407 * We need to do some sortups to eliminate those holes and duplicate entries.
1408 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1409 */
1410 for (i = 0; i < num_of_wbrf_ranges; i++) {
1411 start = wifi_bands[i].start;
1412 end = wifi_bands[i].end;
1413
1414 /* get the last valid entry to fill the intermediate hole */
1415 if (!start && !end) {
1416 for (j = num_of_wbrf_ranges - 1; j > i; j--)
1417 if (wifi_bands[j].start && wifi_bands[j].end)
1418 break;
1419
1420 /* no valid entry left */
1421 if (j <= i)
1422 break;
1423
1424 start = wifi_bands[i].start = wifi_bands[j].start;
1425 end = wifi_bands[i].end = wifi_bands[j].end;
1426 wifi_bands[j].start = 0;
1427 wifi_bands[j].end = 0;
1428 num_of_wbrf_ranges = j;
1429 }
1430
1431 /* eliminate duplicate entries */
1432 for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1433 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1434 wifi_bands[j].start = 0;
1435 wifi_bands[j].end = 0;
1436 }
1437 }
1438 }
1439
1440 /* Send the sorted wifi_bands to PMFW */
1441 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1442 /* Try to set the wifi_bands again */
1443 if (unlikely(ret == -EBUSY)) {
1444 mdelay(5);
1445 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1446 }
1447
1448 return ret;
1449 }
1450
1451 /**
1452 * smu_wbrf_event_handler - handle notify events
1453 *
1454 * @nb: notifier block
1455 * @action: event type
1456 * @_arg: event data
1457 *
1458 * Calls relevant amdgpu function in response to wbrf event
1459 * notification from kernel.
1460 */
smu_wbrf_event_handler(struct notifier_block * nb,unsigned long action,void * _arg)1461 static int smu_wbrf_event_handler(struct notifier_block *nb,
1462 unsigned long action, void *_arg)
1463 {
1464 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1465
1466 switch (action) {
1467 case WBRF_CHANGED:
1468 schedule_delayed_work(&smu->wbrf_delayed_work,
1469 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1470 break;
1471 default:
1472 return NOTIFY_DONE;
1473 }
1474
1475 return NOTIFY_OK;
1476 }
1477
1478 /**
1479 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1480 *
1481 * @work: struct work_struct pointer
1482 *
1483 * Flood is over and driver will consume the latest exclusion ranges.
1484 */
smu_wbrf_delayed_work_handler(struct work_struct * work)1485 static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1486 {
1487 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1488
1489 smu_wbrf_handle_exclusion_ranges(smu);
1490 }
1491
1492 /**
1493 * smu_wbrf_support_check - check wbrf support
1494 *
1495 * @smu: smu_context pointer
1496 *
1497 * Verifies the ACPI interface whether wbrf is supported.
1498 */
smu_wbrf_support_check(struct smu_context * smu)1499 static void smu_wbrf_support_check(struct smu_context *smu)
1500 {
1501 struct amdgpu_device *adev = smu->adev;
1502
1503 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1504 acpi_amd_wbrf_supported_consumer(adev->dev);
1505
1506 if (smu->wbrf_supported)
1507 dev_info(adev->dev, "RF interference mitigation is supported\n");
1508 }
1509
1510 /**
1511 * smu_wbrf_init - init driver wbrf support
1512 *
1513 * @smu: smu_context pointer
1514 *
1515 * Verifies the AMD ACPI interfaces and registers with the wbrf
1516 * notifier chain if wbrf feature is supported.
1517 * Returns 0 on success, error on failure.
1518 */
smu_wbrf_init(struct smu_context * smu)1519 static int smu_wbrf_init(struct smu_context *smu)
1520 {
1521 int ret;
1522
1523 if (!smu->wbrf_supported)
1524 return 0;
1525
1526 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1527
1528 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1529 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1530 if (ret)
1531 return ret;
1532
1533 /*
1534 * Some wifiband exclusion ranges may be already there
1535 * before our driver loaded. To make sure our driver
1536 * is awared of those exclusion ranges.
1537 */
1538 schedule_delayed_work(&smu->wbrf_delayed_work,
1539 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1540
1541 return 0;
1542 }
1543
1544 /**
1545 * smu_wbrf_fini - tear down driver wbrf support
1546 *
1547 * @smu: smu_context pointer
1548 *
1549 * Unregisters with the wbrf notifier chain.
1550 */
smu_wbrf_fini(struct smu_context * smu)1551 static void smu_wbrf_fini(struct smu_context *smu)
1552 {
1553 if (!smu->wbrf_supported)
1554 return;
1555
1556 amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1557
1558 cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1559 }
1560
smu_smc_hw_setup(struct smu_context * smu)1561 static int smu_smc_hw_setup(struct smu_context *smu)
1562 {
1563 struct smu_feature *feature = &smu->smu_feature;
1564 struct amdgpu_device *adev = smu->adev;
1565 uint8_t pcie_gen = 0, pcie_width = 0;
1566 uint64_t features_supported;
1567 int ret = 0;
1568
1569 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1570 case IP_VERSION(11, 0, 7):
1571 case IP_VERSION(11, 0, 11):
1572 case IP_VERSION(11, 5, 0):
1573 case IP_VERSION(11, 5, 2):
1574 case IP_VERSION(11, 0, 12):
1575 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1576 dev_info(adev->dev, "dpm has been enabled\n");
1577 ret = smu_system_features_control(smu, true);
1578 if (ret)
1579 dev_err(adev->dev, "Failed system features control!\n");
1580 return ret;
1581 }
1582 break;
1583 default:
1584 break;
1585 }
1586
1587 ret = smu_init_display_count(smu, 0);
1588 if (ret) {
1589 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1590 return ret;
1591 }
1592
1593 ret = smu_set_driver_table_location(smu);
1594 if (ret) {
1595 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1596 return ret;
1597 }
1598
1599 /*
1600 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1601 */
1602 ret = smu_set_tool_table_location(smu);
1603 if (ret) {
1604 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1605 return ret;
1606 }
1607
1608 /*
1609 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1610 * pool location.
1611 */
1612 ret = smu_notify_memory_pool_location(smu);
1613 if (ret) {
1614 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1615 return ret;
1616 }
1617
1618 /*
1619 * It is assumed the pptable used before runpm is same as
1620 * the one used afterwards. Thus, we can reuse the stored
1621 * copy and do not need to resetup the pptable again.
1622 */
1623 if (!adev->in_runpm) {
1624 ret = smu_setup_pptable(smu);
1625 if (ret) {
1626 dev_err(adev->dev, "Failed to setup pptable!\n");
1627 return ret;
1628 }
1629 }
1630
1631 /* smu_dump_pptable(smu); */
1632
1633 /*
1634 * With SCPM enabled, PSP is responsible for the PPTable transferring
1635 * (to SMU). Driver involvement is not needed and permitted.
1636 */
1637 if (!adev->scpm_enabled) {
1638 /*
1639 * Copy pptable bo in the vram to smc with SMU MSGs such as
1640 * SetDriverDramAddr and TransferTableDram2Smu.
1641 */
1642 ret = smu_write_pptable(smu);
1643 if (ret) {
1644 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1645 return ret;
1646 }
1647 }
1648
1649 /* issue Run*Btc msg */
1650 ret = smu_run_btc(smu);
1651 if (ret)
1652 return ret;
1653
1654 /* Enable UclkShadow on wbrf supported */
1655 if (smu->wbrf_supported) {
1656 ret = smu_enable_uclk_shadow(smu, true);
1657 if (ret) {
1658 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1659 return ret;
1660 }
1661 }
1662
1663 /*
1664 * With SCPM enabled, these actions(and relevant messages) are
1665 * not needed and permitted.
1666 */
1667 if (!adev->scpm_enabled) {
1668 ret = smu_feature_set_allowed_mask(smu);
1669 if (ret) {
1670 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1671 return ret;
1672 }
1673 }
1674
1675 ret = smu_system_features_control(smu, true);
1676 if (ret) {
1677 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1678 return ret;
1679 }
1680
1681 smu_init_xgmi_plpd_mode(smu);
1682
1683 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1684 if (ret) {
1685 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1686 return ret;
1687 }
1688 bitmap_copy(feature->supported,
1689 (unsigned long *)&features_supported,
1690 feature->feature_num);
1691
1692 if (!smu_is_dpm_running(smu))
1693 dev_info(adev->dev, "dpm has been disabled\n");
1694
1695 /*
1696 * Set initialized values (get from vbios) to dpm tables context such as
1697 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1698 * type of clks.
1699 */
1700 ret = smu_set_default_dpm_table(smu);
1701 if (ret) {
1702 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1703 return ret;
1704 }
1705
1706 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
1707 pcie_gen = 4;
1708 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1709 pcie_gen = 3;
1710 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1711 pcie_gen = 2;
1712 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1713 pcie_gen = 1;
1714 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1715 pcie_gen = 0;
1716
1717 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1718 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1719 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1720 */
1721 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
1722 pcie_width = 7;
1723 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1724 pcie_width = 6;
1725 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1726 pcie_width = 5;
1727 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1728 pcie_width = 4;
1729 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1730 pcie_width = 3;
1731 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1732 pcie_width = 2;
1733 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1734 pcie_width = 1;
1735 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1736 if (ret) {
1737 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1738 return ret;
1739 }
1740
1741 ret = smu_get_thermal_temperature_range(smu);
1742 if (ret) {
1743 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1744 return ret;
1745 }
1746
1747 ret = smu_enable_thermal_alert(smu);
1748 if (ret) {
1749 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1750 return ret;
1751 }
1752
1753 ret = smu_notify_display_change(smu);
1754 if (ret) {
1755 dev_err(adev->dev, "Failed to notify display change!\n");
1756 return ret;
1757 }
1758
1759 /*
1760 * Set min deep sleep dce fclk with bootup value from vbios via
1761 * SetMinDeepSleepDcefclk MSG.
1762 */
1763 ret = smu_set_min_dcef_deep_sleep(smu,
1764 smu->smu_table.boot_values.dcefclk / 100);
1765 if (ret) {
1766 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1767 return ret;
1768 }
1769
1770 /* Init wbrf support. Properly setup the notifier */
1771 ret = smu_wbrf_init(smu);
1772 if (ret)
1773 dev_err(adev->dev, "Error during wbrf init call\n");
1774
1775 return ret;
1776 }
1777
smu_start_smc_engine(struct smu_context * smu)1778 static int smu_start_smc_engine(struct smu_context *smu)
1779 {
1780 struct amdgpu_device *adev = smu->adev;
1781 int ret = 0;
1782
1783 smu->smc_fw_state = SMU_FW_INIT;
1784
1785 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1786 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1787 if (smu->ppt_funcs->load_microcode) {
1788 ret = smu->ppt_funcs->load_microcode(smu);
1789 if (ret)
1790 return ret;
1791 }
1792 }
1793 }
1794
1795 if (smu->ppt_funcs->check_fw_status) {
1796 ret = smu->ppt_funcs->check_fw_status(smu);
1797 if (ret) {
1798 dev_err(adev->dev, "SMC is not ready\n");
1799 return ret;
1800 }
1801 }
1802
1803 /*
1804 * Send msg GetDriverIfVersion to check if the return value is equal
1805 * with DRIVER_IF_VERSION of smc header.
1806 */
1807 ret = smu_check_fw_version(smu);
1808 if (ret)
1809 return ret;
1810
1811 return ret;
1812 }
1813
smu_hw_init(struct amdgpu_ip_block * ip_block)1814 static int smu_hw_init(struct amdgpu_ip_block *ip_block)
1815 {
1816 int i, ret;
1817 struct amdgpu_device *adev = ip_block->adev;
1818 struct smu_context *smu = adev->powerplay.pp_handle;
1819
1820 if (amdgpu_sriov_multi_vf_mode(adev)) {
1821 smu->pm_enabled = false;
1822 return 0;
1823 }
1824
1825 ret = smu_start_smc_engine(smu);
1826 if (ret) {
1827 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1828 return ret;
1829 }
1830
1831 /*
1832 * Check whether wbrf is supported. This needs to be done
1833 * before SMU setup starts since part of SMU configuration
1834 * relies on this.
1835 */
1836 smu_wbrf_support_check(smu);
1837
1838 if (smu->is_apu) {
1839 ret = smu_set_gfx_imu_enable(smu);
1840 if (ret)
1841 return ret;
1842 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1843 smu_dpm_set_vcn_enable(smu, true, i);
1844 smu_dpm_set_jpeg_enable(smu, true);
1845 smu_dpm_set_vpe_enable(smu, true);
1846 smu_dpm_set_umsch_mm_enable(smu, true);
1847 smu_set_mall_enable(smu);
1848 smu_set_gfx_cgpg(smu, true);
1849 }
1850
1851 if (!smu->pm_enabled)
1852 return 0;
1853
1854 ret = smu_get_driver_allowed_feature_mask(smu);
1855 if (ret)
1856 return ret;
1857
1858 ret = smu_smc_hw_setup(smu);
1859 if (ret) {
1860 dev_err(adev->dev, "Failed to setup smc hw!\n");
1861 return ret;
1862 }
1863
1864 /*
1865 * Move maximum sustainable clock retrieving here considering
1866 * 1. It is not needed on resume(from S3).
1867 * 2. DAL settings come between .hw_init and .late_init of SMU.
1868 * And DAL needs to know the maximum sustainable clocks. Thus
1869 * it cannot be put in .late_init().
1870 */
1871 ret = smu_init_max_sustainable_clocks(smu);
1872 if (ret) {
1873 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1874 return ret;
1875 }
1876
1877 adev->pm.dpm_enabled = true;
1878
1879 dev_info(adev->dev, "SMU is initialized successfully!\n");
1880
1881 return 0;
1882 }
1883
smu_disable_dpms(struct smu_context * smu)1884 static int smu_disable_dpms(struct smu_context *smu)
1885 {
1886 struct amdgpu_device *adev = smu->adev;
1887 int ret = 0;
1888 bool use_baco = !smu->is_apu &&
1889 ((amdgpu_in_reset(adev) &&
1890 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1891 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1892
1893 /*
1894 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1895 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1896 */
1897 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1898 case IP_VERSION(13, 0, 0):
1899 case IP_VERSION(13, 0, 7):
1900 case IP_VERSION(13, 0, 10):
1901 case IP_VERSION(14, 0, 2):
1902 case IP_VERSION(14, 0, 3):
1903 return 0;
1904 default:
1905 break;
1906 }
1907
1908 /*
1909 * For custom pptable uploading, skip the DPM features
1910 * disable process on Navi1x ASICs.
1911 * - As the gfx related features are under control of
1912 * RLC on those ASICs. RLC reinitialization will be
1913 * needed to reenable them. That will cost much more
1914 * efforts.
1915 *
1916 * - SMU firmware can handle the DPM reenablement
1917 * properly.
1918 */
1919 if (smu->uploading_custom_pp_table) {
1920 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1921 case IP_VERSION(11, 0, 0):
1922 case IP_VERSION(11, 0, 5):
1923 case IP_VERSION(11, 0, 9):
1924 case IP_VERSION(11, 0, 7):
1925 case IP_VERSION(11, 0, 11):
1926 case IP_VERSION(11, 5, 0):
1927 case IP_VERSION(11, 5, 2):
1928 case IP_VERSION(11, 0, 12):
1929 case IP_VERSION(11, 0, 13):
1930 return 0;
1931 default:
1932 break;
1933 }
1934 }
1935
1936 /*
1937 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1938 * on BACO in. Driver involvement is unnecessary.
1939 */
1940 if (use_baco) {
1941 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1942 case IP_VERSION(11, 0, 7):
1943 case IP_VERSION(11, 0, 0):
1944 case IP_VERSION(11, 0, 5):
1945 case IP_VERSION(11, 0, 9):
1946 case IP_VERSION(13, 0, 7):
1947 return 0;
1948 default:
1949 break;
1950 }
1951 }
1952
1953 /*
1954 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
1955 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1956 */
1957 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
1958 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
1959 return 0;
1960
1961 /*
1962 * For gpu reset, runpm and hibernation through BACO,
1963 * BACO feature has to be kept enabled.
1964 */
1965 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1966 ret = smu_disable_all_features_with_exception(smu,
1967 SMU_FEATURE_BACO_BIT);
1968 if (ret)
1969 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1970 } else {
1971 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1972 if (!adev->scpm_enabled) {
1973 ret = smu_system_features_control(smu, false);
1974 if (ret)
1975 dev_err(adev->dev, "Failed to disable smu features.\n");
1976 }
1977 }
1978
1979 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
1980 * otherwise SMU will hang while interacting with RLC if RLC is halted
1981 * this is a WA for Vangogh asic which fix the SMU hang issue.
1982 */
1983 ret = smu_notify_rlc_state(smu, false);
1984 if (ret) {
1985 dev_err(adev->dev, "Fail to notify rlc status!\n");
1986 return ret;
1987 }
1988
1989 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
1990 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
1991 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1992 adev->gfx.rlc.funcs->stop(adev);
1993
1994 return ret;
1995 }
1996
smu_smc_hw_cleanup(struct smu_context * smu)1997 static int smu_smc_hw_cleanup(struct smu_context *smu)
1998 {
1999 struct amdgpu_device *adev = smu->adev;
2000 int ret = 0;
2001
2002 smu_wbrf_fini(smu);
2003
2004 cancel_work_sync(&smu->throttling_logging_work);
2005 cancel_work_sync(&smu->interrupt_work);
2006
2007 ret = smu_disable_thermal_alert(smu);
2008 if (ret) {
2009 dev_err(adev->dev, "Fail to disable thermal alert!\n");
2010 return ret;
2011 }
2012
2013 cancel_delayed_work_sync(&smu->swctf_delayed_work);
2014
2015 ret = smu_disable_dpms(smu);
2016 if (ret) {
2017 dev_err(adev->dev, "Fail to disable dpm features!\n");
2018 return ret;
2019 }
2020
2021 return 0;
2022 }
2023
smu_reset_mp1_state(struct smu_context * smu)2024 static int smu_reset_mp1_state(struct smu_context *smu)
2025 {
2026 struct amdgpu_device *adev = smu->adev;
2027 int ret = 0;
2028
2029 if ((!adev->in_runpm) && (!adev->in_suspend) &&
2030 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2031 IP_VERSION(13, 0, 10) &&
2032 !amdgpu_device_has_display_hardware(adev))
2033 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2034
2035 return ret;
2036 }
2037
smu_hw_fini(struct amdgpu_ip_block * ip_block)2038 static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
2039 {
2040 struct amdgpu_device *adev = ip_block->adev;
2041 struct smu_context *smu = adev->powerplay.pp_handle;
2042 int i, ret;
2043
2044 if (amdgpu_sriov_multi_vf_mode(adev))
2045 return 0;
2046
2047 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2048 smu_dpm_set_vcn_enable(smu, false, i);
2049 adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE;
2050 }
2051 smu_dpm_set_jpeg_enable(smu, false);
2052 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2053 smu_dpm_set_vpe_enable(smu, false);
2054 smu_dpm_set_umsch_mm_enable(smu, false);
2055
2056 if (!smu->pm_enabled)
2057 return 0;
2058
2059 adev->pm.dpm_enabled = false;
2060
2061 ret = smu_smc_hw_cleanup(smu);
2062 if (ret)
2063 return ret;
2064
2065 ret = smu_reset_mp1_state(smu);
2066 if (ret)
2067 return ret;
2068
2069 return 0;
2070 }
2071
smu_late_fini(struct amdgpu_ip_block * ip_block)2072 static void smu_late_fini(struct amdgpu_ip_block *ip_block)
2073 {
2074 struct amdgpu_device *adev = ip_block->adev;
2075 struct smu_context *smu = adev->powerplay.pp_handle;
2076
2077 kfree(smu);
2078 }
2079
smu_reset(struct smu_context * smu)2080 static int smu_reset(struct smu_context *smu)
2081 {
2082 struct amdgpu_device *adev = smu->adev;
2083 struct amdgpu_ip_block *ip_block;
2084 int ret;
2085
2086 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
2087 if (!ip_block)
2088 return -EINVAL;
2089
2090 ret = smu_hw_fini(ip_block);
2091 if (ret)
2092 return ret;
2093
2094 ret = smu_hw_init(ip_block);
2095 if (ret)
2096 return ret;
2097
2098 ret = smu_late_init(ip_block);
2099 if (ret)
2100 return ret;
2101
2102 return 0;
2103 }
2104
smu_suspend(struct amdgpu_ip_block * ip_block)2105 static int smu_suspend(struct amdgpu_ip_block *ip_block)
2106 {
2107 struct amdgpu_device *adev = ip_block->adev;
2108 struct smu_context *smu = adev->powerplay.pp_handle;
2109 int ret;
2110 uint64_t count;
2111
2112 if (amdgpu_sriov_multi_vf_mode(adev))
2113 return 0;
2114
2115 if (!smu->pm_enabled)
2116 return 0;
2117
2118 adev->pm.dpm_enabled = false;
2119
2120 ret = smu_smc_hw_cleanup(smu);
2121 if (ret)
2122 return ret;
2123
2124 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2125
2126 smu_set_gfx_cgpg(smu, false);
2127
2128 /*
2129 * pwfw resets entrycount when device is suspended, so we save the
2130 * last value to be used when we resume to keep it consistent
2131 */
2132 ret = smu_get_entrycount_gfxoff(smu, &count);
2133 if (!ret)
2134 adev->gfx.gfx_off_entrycount = count;
2135
2136 /* clear this on suspend so it will get reprogrammed on resume */
2137 smu->workload_mask = 0;
2138
2139 return 0;
2140 }
2141
smu_resume(struct amdgpu_ip_block * ip_block)2142 static int smu_resume(struct amdgpu_ip_block *ip_block)
2143 {
2144 int ret;
2145 struct amdgpu_device *adev = ip_block->adev;
2146 struct smu_context *smu = adev->powerplay.pp_handle;
2147
2148 if (amdgpu_sriov_multi_vf_mode(adev))
2149 return 0;
2150
2151 if (!smu->pm_enabled)
2152 return 0;
2153
2154 dev_info(adev->dev, "SMU is resuming...\n");
2155
2156 ret = smu_start_smc_engine(smu);
2157 if (ret) {
2158 dev_err(adev->dev, "SMC engine is not correctly up!\n");
2159 return ret;
2160 }
2161
2162 ret = smu_smc_hw_setup(smu);
2163 if (ret) {
2164 dev_err(adev->dev, "Failed to setup smc hw!\n");
2165 return ret;
2166 }
2167
2168 ret = smu_set_gfx_imu_enable(smu);
2169 if (ret)
2170 return ret;
2171
2172 smu_set_gfx_cgpg(smu, true);
2173
2174 smu->disable_uclk_switch = 0;
2175
2176 adev->pm.dpm_enabled = true;
2177
2178 dev_info(adev->dev, "SMU is resumed successfully!\n");
2179
2180 return 0;
2181 }
2182
smu_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)2183 static int smu_display_configuration_change(void *handle,
2184 const struct amd_pp_display_configuration *display_config)
2185 {
2186 struct smu_context *smu = handle;
2187
2188 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2189 return -EOPNOTSUPP;
2190
2191 if (!display_config)
2192 return -EINVAL;
2193
2194 smu_set_min_dcef_deep_sleep(smu,
2195 display_config->min_dcef_deep_sleep_set_clk / 100);
2196
2197 return 0;
2198 }
2199
smu_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)2200 static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2201 enum amd_clockgating_state state)
2202 {
2203 return 0;
2204 }
2205
smu_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)2206 static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block,
2207 enum amd_powergating_state state)
2208 {
2209 return 0;
2210 }
2211
smu_enable_umd_pstate(void * handle,enum amd_dpm_forced_level * level)2212 static int smu_enable_umd_pstate(void *handle,
2213 enum amd_dpm_forced_level *level)
2214 {
2215 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2216 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2217 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2218 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2219
2220 struct smu_context *smu = (struct smu_context*)(handle);
2221 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2222
2223 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2224 return -EINVAL;
2225
2226 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2227 /* enter umd pstate, save current level, disable gfx cg*/
2228 if (*level & profile_mode_mask) {
2229 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2230 smu_gpo_control(smu, false);
2231 smu_gfx_ulv_control(smu, false);
2232 smu_deep_sleep_control(smu, false);
2233 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2234 }
2235 } else {
2236 /* exit umd pstate, restore level, enable gfx cg*/
2237 if (!(*level & profile_mode_mask)) {
2238 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2239 *level = smu_dpm_ctx->saved_dpm_level;
2240 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2241 smu_deep_sleep_control(smu, true);
2242 smu_gfx_ulv_control(smu, true);
2243 smu_gpo_control(smu, true);
2244 }
2245 }
2246
2247 return 0;
2248 }
2249
smu_bump_power_profile_mode(struct smu_context * smu,long * custom_params,u32 custom_params_max_idx)2250 static int smu_bump_power_profile_mode(struct smu_context *smu,
2251 long *custom_params,
2252 u32 custom_params_max_idx)
2253 {
2254 u32 workload_mask = 0;
2255 int i, ret = 0;
2256
2257 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
2258 if (smu->workload_refcount[i])
2259 workload_mask |= 1 << i;
2260 }
2261
2262 if (smu->workload_mask == workload_mask)
2263 return 0;
2264
2265 if (smu->ppt_funcs->set_power_profile_mode)
2266 ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
2267 custom_params,
2268 custom_params_max_idx);
2269
2270 if (!ret)
2271 smu->workload_mask = workload_mask;
2272
2273 return ret;
2274 }
2275
smu_power_profile_mode_get(struct smu_context * smu,enum PP_SMC_POWER_PROFILE profile_mode)2276 static void smu_power_profile_mode_get(struct smu_context *smu,
2277 enum PP_SMC_POWER_PROFILE profile_mode)
2278 {
2279 smu->workload_refcount[profile_mode]++;
2280 }
2281
smu_power_profile_mode_put(struct smu_context * smu,enum PP_SMC_POWER_PROFILE profile_mode)2282 static void smu_power_profile_mode_put(struct smu_context *smu,
2283 enum PP_SMC_POWER_PROFILE profile_mode)
2284 {
2285 if (smu->workload_refcount[profile_mode])
2286 smu->workload_refcount[profile_mode]--;
2287 }
2288
smu_adjust_power_state_dynamic(struct smu_context * smu,enum amd_dpm_forced_level level,bool skip_display_settings)2289 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2290 enum amd_dpm_forced_level level,
2291 bool skip_display_settings)
2292 {
2293 int ret = 0;
2294 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2295
2296 if (!skip_display_settings) {
2297 ret = smu_display_config_changed(smu);
2298 if (ret) {
2299 dev_err(smu->adev->dev, "Failed to change display config!");
2300 return ret;
2301 }
2302 }
2303
2304 ret = smu_apply_clocks_adjust_rules(smu);
2305 if (ret) {
2306 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2307 return ret;
2308 }
2309
2310 if (!skip_display_settings) {
2311 ret = smu_notify_smc_display_config(smu);
2312 if (ret) {
2313 dev_err(smu->adev->dev, "Failed to notify smc display config!");
2314 return ret;
2315 }
2316 }
2317
2318 if (smu_dpm_ctx->dpm_level != level) {
2319 ret = smu_asic_set_performance_level(smu, level);
2320 if (ret) {
2321 if (ret == -EOPNOTSUPP)
2322 dev_info(smu->adev->dev, "set performance level %d not supported",
2323 level);
2324 else
2325 dev_err(smu->adev->dev, "Failed to set performance level %d",
2326 level);
2327 return ret;
2328 }
2329
2330 /* update the saved copy */
2331 smu_dpm_ctx->dpm_level = level;
2332 }
2333
2334 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2335 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2336 smu_bump_power_profile_mode(smu, NULL, 0);
2337
2338 return ret;
2339 }
2340
smu_handle_task(struct smu_context * smu,enum amd_dpm_forced_level level,enum amd_pp_task task_id)2341 static int smu_handle_task(struct smu_context *smu,
2342 enum amd_dpm_forced_level level,
2343 enum amd_pp_task task_id)
2344 {
2345 int ret = 0;
2346
2347 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2348 return -EOPNOTSUPP;
2349
2350 switch (task_id) {
2351 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2352 ret = smu_pre_display_config_changed(smu);
2353 if (ret)
2354 return ret;
2355 ret = smu_adjust_power_state_dynamic(smu, level, false);
2356 break;
2357 case AMD_PP_TASK_COMPLETE_INIT:
2358 ret = smu_adjust_power_state_dynamic(smu, level, true);
2359 break;
2360 case AMD_PP_TASK_READJUST_POWER_STATE:
2361 ret = smu_adjust_power_state_dynamic(smu, level, true);
2362 break;
2363 default:
2364 break;
2365 }
2366
2367 return ret;
2368 }
2369
smu_handle_dpm_task(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)2370 static int smu_handle_dpm_task(void *handle,
2371 enum amd_pp_task task_id,
2372 enum amd_pm_state_type *user_state)
2373 {
2374 struct smu_context *smu = handle;
2375 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2376
2377 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2378
2379 }
2380
smu_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool enable)2381 static int smu_switch_power_profile(void *handle,
2382 enum PP_SMC_POWER_PROFILE type,
2383 bool enable)
2384 {
2385 struct smu_context *smu = handle;
2386 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2387 int ret;
2388
2389 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2390 return -EOPNOTSUPP;
2391
2392 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2393 return -EINVAL;
2394
2395 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2396 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2397 if (enable)
2398 smu_power_profile_mode_get(smu, type);
2399 else
2400 smu_power_profile_mode_put(smu, type);
2401 /* don't switch the active workload when paused */
2402 if (smu->pause_workload)
2403 ret = 0;
2404 else
2405 ret = smu_bump_power_profile_mode(smu, NULL, 0);
2406 if (ret) {
2407 if (enable)
2408 smu_power_profile_mode_put(smu, type);
2409 else
2410 smu_power_profile_mode_get(smu, type);
2411 return ret;
2412 }
2413 }
2414
2415 return 0;
2416 }
2417
smu_pause_power_profile(void * handle,bool pause)2418 static int smu_pause_power_profile(void *handle,
2419 bool pause)
2420 {
2421 struct smu_context *smu = handle;
2422 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2423 u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
2424 int ret;
2425
2426 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2427 return -EOPNOTSUPP;
2428
2429 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2430 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2431 smu->pause_workload = pause;
2432
2433 /* force to bootup default profile */
2434 if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
2435 ret = smu->ppt_funcs->set_power_profile_mode(smu,
2436 workload_mask,
2437 NULL,
2438 0);
2439 else
2440 ret = smu_bump_power_profile_mode(smu, NULL, 0);
2441 return ret;
2442 }
2443
2444 return 0;
2445 }
2446
smu_get_performance_level(void * handle)2447 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2448 {
2449 struct smu_context *smu = handle;
2450 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2451
2452 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2453 return -EOPNOTSUPP;
2454
2455 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2456 return -EINVAL;
2457
2458 return smu_dpm_ctx->dpm_level;
2459 }
2460
smu_force_performance_level(void * handle,enum amd_dpm_forced_level level)2461 static int smu_force_performance_level(void *handle,
2462 enum amd_dpm_forced_level level)
2463 {
2464 struct smu_context *smu = handle;
2465 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2466 int ret = 0;
2467
2468 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2469 return -EOPNOTSUPP;
2470
2471 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2472 return -EINVAL;
2473
2474 ret = smu_enable_umd_pstate(smu, &level);
2475 if (ret)
2476 return ret;
2477
2478 ret = smu_handle_task(smu, level,
2479 AMD_PP_TASK_READJUST_POWER_STATE);
2480
2481 /* reset user dpm clock state */
2482 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2483 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2484 smu->user_dpm_profile.clk_dependency = 0;
2485 }
2486
2487 return ret;
2488 }
2489
smu_set_display_count(void * handle,uint32_t count)2490 static int smu_set_display_count(void *handle, uint32_t count)
2491 {
2492 struct smu_context *smu = handle;
2493
2494 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2495 return -EOPNOTSUPP;
2496
2497 return smu_init_display_count(smu, count);
2498 }
2499
smu_force_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)2500 static int smu_force_smuclk_levels(struct smu_context *smu,
2501 enum smu_clk_type clk_type,
2502 uint32_t mask)
2503 {
2504 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2505 int ret = 0;
2506
2507 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2508 return -EOPNOTSUPP;
2509
2510 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2511 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2512 return -EINVAL;
2513 }
2514
2515 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2516 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2517 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2518 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2519 smu_set_user_clk_dependencies(smu, clk_type);
2520 }
2521 }
2522
2523 return ret;
2524 }
2525
smu_force_ppclk_levels(void * handle,enum pp_clock_type type,uint32_t mask)2526 static int smu_force_ppclk_levels(void *handle,
2527 enum pp_clock_type type,
2528 uint32_t mask)
2529 {
2530 struct smu_context *smu = handle;
2531 enum smu_clk_type clk_type;
2532
2533 switch (type) {
2534 case PP_SCLK:
2535 clk_type = SMU_SCLK; break;
2536 case PP_MCLK:
2537 clk_type = SMU_MCLK; break;
2538 case PP_PCIE:
2539 clk_type = SMU_PCIE; break;
2540 case PP_SOCCLK:
2541 clk_type = SMU_SOCCLK; break;
2542 case PP_FCLK:
2543 clk_type = SMU_FCLK; break;
2544 case PP_DCEFCLK:
2545 clk_type = SMU_DCEFCLK; break;
2546 case PP_VCLK:
2547 clk_type = SMU_VCLK; break;
2548 case PP_VCLK1:
2549 clk_type = SMU_VCLK1; break;
2550 case PP_DCLK:
2551 clk_type = SMU_DCLK; break;
2552 case PP_DCLK1:
2553 clk_type = SMU_DCLK1; break;
2554 case OD_SCLK:
2555 clk_type = SMU_OD_SCLK; break;
2556 case OD_MCLK:
2557 clk_type = SMU_OD_MCLK; break;
2558 case OD_VDDC_CURVE:
2559 clk_type = SMU_OD_VDDC_CURVE; break;
2560 case OD_RANGE:
2561 clk_type = SMU_OD_RANGE; break;
2562 default:
2563 return -EINVAL;
2564 }
2565
2566 return smu_force_smuclk_levels(smu, clk_type, mask);
2567 }
2568
2569 /*
2570 * On system suspending or resetting, the dpm_enabled
2571 * flag will be cleared. So that those SMU services which
2572 * are not supported will be gated.
2573 * However, the mp1 state setting should still be granted
2574 * even if the dpm_enabled cleared.
2575 */
smu_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)2576 static int smu_set_mp1_state(void *handle,
2577 enum pp_mp1_state mp1_state)
2578 {
2579 struct smu_context *smu = handle;
2580 int ret = 0;
2581
2582 if (!smu->pm_enabled)
2583 return -EOPNOTSUPP;
2584
2585 if (smu->ppt_funcs &&
2586 smu->ppt_funcs->set_mp1_state)
2587 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2588
2589 return ret;
2590 }
2591
smu_set_df_cstate(void * handle,enum pp_df_cstate state)2592 static int smu_set_df_cstate(void *handle,
2593 enum pp_df_cstate state)
2594 {
2595 struct smu_context *smu = handle;
2596 int ret = 0;
2597
2598 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2599 return -EOPNOTSUPP;
2600
2601 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2602 return 0;
2603
2604 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2605 if (ret)
2606 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2607
2608 return ret;
2609 }
2610
smu_write_watermarks_table(struct smu_context * smu)2611 int smu_write_watermarks_table(struct smu_context *smu)
2612 {
2613 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2614 return -EOPNOTSUPP;
2615
2616 return smu_set_watermarks_table(smu, NULL);
2617 }
2618
smu_set_watermarks_for_clock_ranges(void * handle,struct pp_smu_wm_range_sets * clock_ranges)2619 static int smu_set_watermarks_for_clock_ranges(void *handle,
2620 struct pp_smu_wm_range_sets *clock_ranges)
2621 {
2622 struct smu_context *smu = handle;
2623
2624 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2625 return -EOPNOTSUPP;
2626
2627 if (smu->disable_watermark)
2628 return 0;
2629
2630 return smu_set_watermarks_table(smu, clock_ranges);
2631 }
2632
smu_set_ac_dc(struct smu_context * smu)2633 int smu_set_ac_dc(struct smu_context *smu)
2634 {
2635 int ret = 0;
2636
2637 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2638 return -EOPNOTSUPP;
2639
2640 /* controlled by firmware */
2641 if (smu->dc_controlled_by_gpio)
2642 return 0;
2643
2644 ret = smu_set_power_source(smu,
2645 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2646 SMU_POWER_SOURCE_DC);
2647 if (ret)
2648 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2649 smu->adev->pm.ac_power ? "AC" : "DC");
2650
2651 return ret;
2652 }
2653
2654 const struct amd_ip_funcs smu_ip_funcs = {
2655 .name = "smu",
2656 .early_init = smu_early_init,
2657 .late_init = smu_late_init,
2658 .sw_init = smu_sw_init,
2659 .sw_fini = smu_sw_fini,
2660 .hw_init = smu_hw_init,
2661 .hw_fini = smu_hw_fini,
2662 .late_fini = smu_late_fini,
2663 .suspend = smu_suspend,
2664 .resume = smu_resume,
2665 .is_idle = NULL,
2666 .check_soft_reset = NULL,
2667 .wait_for_idle = NULL,
2668 .soft_reset = NULL,
2669 .set_clockgating_state = smu_set_clockgating_state,
2670 .set_powergating_state = smu_set_powergating_state,
2671 };
2672
2673 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2674 .type = AMD_IP_BLOCK_TYPE_SMC,
2675 .major = 11,
2676 .minor = 0,
2677 .rev = 0,
2678 .funcs = &smu_ip_funcs,
2679 };
2680
2681 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2682 .type = AMD_IP_BLOCK_TYPE_SMC,
2683 .major = 12,
2684 .minor = 0,
2685 .rev = 0,
2686 .funcs = &smu_ip_funcs,
2687 };
2688
2689 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2690 .type = AMD_IP_BLOCK_TYPE_SMC,
2691 .major = 13,
2692 .minor = 0,
2693 .rev = 0,
2694 .funcs = &smu_ip_funcs,
2695 };
2696
2697 const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2698 .type = AMD_IP_BLOCK_TYPE_SMC,
2699 .major = 14,
2700 .minor = 0,
2701 .rev = 0,
2702 .funcs = &smu_ip_funcs,
2703 };
2704
smu_load_microcode(void * handle)2705 static int smu_load_microcode(void *handle)
2706 {
2707 struct smu_context *smu = handle;
2708 struct amdgpu_device *adev = smu->adev;
2709 int ret = 0;
2710
2711 if (!smu->pm_enabled)
2712 return -EOPNOTSUPP;
2713
2714 /* This should be used for non PSP loading */
2715 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2716 return 0;
2717
2718 if (smu->ppt_funcs->load_microcode) {
2719 ret = smu->ppt_funcs->load_microcode(smu);
2720 if (ret) {
2721 dev_err(adev->dev, "Load microcode failed\n");
2722 return ret;
2723 }
2724 }
2725
2726 if (smu->ppt_funcs->check_fw_status) {
2727 ret = smu->ppt_funcs->check_fw_status(smu);
2728 if (ret) {
2729 dev_err(adev->dev, "SMC is not ready\n");
2730 return ret;
2731 }
2732 }
2733
2734 return ret;
2735 }
2736
smu_set_gfx_cgpg(struct smu_context * smu,bool enabled)2737 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2738 {
2739 int ret = 0;
2740
2741 if (smu->ppt_funcs->set_gfx_cgpg)
2742 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2743
2744 return ret;
2745 }
2746
smu_set_fan_speed_rpm(void * handle,uint32_t speed)2747 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2748 {
2749 struct smu_context *smu = handle;
2750 int ret = 0;
2751
2752 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2753 return -EOPNOTSUPP;
2754
2755 if (!smu->ppt_funcs->set_fan_speed_rpm)
2756 return -EOPNOTSUPP;
2757
2758 if (speed == U32_MAX)
2759 return -EINVAL;
2760
2761 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2762 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2763 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2764 smu->user_dpm_profile.fan_speed_rpm = speed;
2765
2766 /* Override custom PWM setting as they cannot co-exist */
2767 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2768 smu->user_dpm_profile.fan_speed_pwm = 0;
2769 }
2770
2771 return ret;
2772 }
2773
2774 /**
2775 * smu_get_power_limit - Request one of the SMU Power Limits
2776 *
2777 * @handle: pointer to smu context
2778 * @limit: requested limit is written back to this variable
2779 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2780 * @pp_power_type: &pp_power_type type of power
2781 * Return: 0 on success, <0 on error
2782 *
2783 */
smu_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type pp_power_type)2784 int smu_get_power_limit(void *handle,
2785 uint32_t *limit,
2786 enum pp_power_limit_level pp_limit_level,
2787 enum pp_power_type pp_power_type)
2788 {
2789 struct smu_context *smu = handle;
2790 struct amdgpu_device *adev = smu->adev;
2791 enum smu_ppt_limit_level limit_level;
2792 uint32_t limit_type;
2793 int ret = 0;
2794
2795 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2796 return -EOPNOTSUPP;
2797
2798 switch (pp_power_type) {
2799 case PP_PWR_TYPE_SUSTAINED:
2800 limit_type = SMU_DEFAULT_PPT_LIMIT;
2801 break;
2802 case PP_PWR_TYPE_FAST:
2803 limit_type = SMU_FAST_PPT_LIMIT;
2804 break;
2805 default:
2806 return -EOPNOTSUPP;
2807 }
2808
2809 switch (pp_limit_level) {
2810 case PP_PWR_LIMIT_CURRENT:
2811 limit_level = SMU_PPT_LIMIT_CURRENT;
2812 break;
2813 case PP_PWR_LIMIT_DEFAULT:
2814 limit_level = SMU_PPT_LIMIT_DEFAULT;
2815 break;
2816 case PP_PWR_LIMIT_MAX:
2817 limit_level = SMU_PPT_LIMIT_MAX;
2818 break;
2819 case PP_PWR_LIMIT_MIN:
2820 limit_level = SMU_PPT_LIMIT_MIN;
2821 break;
2822 default:
2823 return -EOPNOTSUPP;
2824 }
2825
2826 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2827 if (smu->ppt_funcs->get_ppt_limit)
2828 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2829 } else {
2830 switch (limit_level) {
2831 case SMU_PPT_LIMIT_CURRENT:
2832 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2833 case IP_VERSION(13, 0, 2):
2834 case IP_VERSION(13, 0, 6):
2835 case IP_VERSION(13, 0, 12):
2836 case IP_VERSION(13, 0, 14):
2837 case IP_VERSION(11, 0, 7):
2838 case IP_VERSION(11, 0, 11):
2839 case IP_VERSION(11, 0, 12):
2840 case IP_VERSION(11, 0, 13):
2841 ret = smu_get_asic_power_limits(smu,
2842 &smu->current_power_limit,
2843 NULL, NULL, NULL);
2844 break;
2845 default:
2846 break;
2847 }
2848 *limit = smu->current_power_limit;
2849 break;
2850 case SMU_PPT_LIMIT_DEFAULT:
2851 *limit = smu->default_power_limit;
2852 break;
2853 case SMU_PPT_LIMIT_MAX:
2854 *limit = smu->max_power_limit;
2855 break;
2856 case SMU_PPT_LIMIT_MIN:
2857 *limit = smu->min_power_limit;
2858 break;
2859 default:
2860 return -EINVAL;
2861 }
2862 }
2863
2864 return ret;
2865 }
2866
smu_set_power_limit(void * handle,uint32_t limit)2867 static int smu_set_power_limit(void *handle, uint32_t limit)
2868 {
2869 struct smu_context *smu = handle;
2870 uint32_t limit_type = limit >> 24;
2871 int ret = 0;
2872
2873 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2874 return -EOPNOTSUPP;
2875
2876 limit &= (1<<24)-1;
2877 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2878 if (smu->ppt_funcs->set_power_limit)
2879 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2880
2881 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2882 dev_err(smu->adev->dev,
2883 "New power limit (%d) is out of range [%d,%d]\n",
2884 limit, smu->min_power_limit, smu->max_power_limit);
2885 return -EINVAL;
2886 }
2887
2888 if (!limit)
2889 limit = smu->current_power_limit;
2890
2891 if (smu->ppt_funcs->set_power_limit) {
2892 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2893 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2894 smu->user_dpm_profile.power_limit = limit;
2895 }
2896
2897 return ret;
2898 }
2899
smu_print_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)2900 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2901 {
2902 int ret = 0;
2903
2904 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2905 return -EOPNOTSUPP;
2906
2907 if (smu->ppt_funcs->print_clk_levels)
2908 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2909
2910 return ret;
2911 }
2912
smu_convert_to_smuclk(enum pp_clock_type type)2913 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2914 {
2915 enum smu_clk_type clk_type;
2916
2917 switch (type) {
2918 case PP_SCLK:
2919 clk_type = SMU_SCLK; break;
2920 case PP_MCLK:
2921 clk_type = SMU_MCLK; break;
2922 case PP_PCIE:
2923 clk_type = SMU_PCIE; break;
2924 case PP_SOCCLK:
2925 clk_type = SMU_SOCCLK; break;
2926 case PP_FCLK:
2927 clk_type = SMU_FCLK; break;
2928 case PP_DCEFCLK:
2929 clk_type = SMU_DCEFCLK; break;
2930 case PP_VCLK:
2931 clk_type = SMU_VCLK; break;
2932 case PP_VCLK1:
2933 clk_type = SMU_VCLK1; break;
2934 case PP_DCLK:
2935 clk_type = SMU_DCLK; break;
2936 case PP_DCLK1:
2937 clk_type = SMU_DCLK1; break;
2938 case OD_SCLK:
2939 clk_type = SMU_OD_SCLK; break;
2940 case OD_MCLK:
2941 clk_type = SMU_OD_MCLK; break;
2942 case OD_VDDC_CURVE:
2943 clk_type = SMU_OD_VDDC_CURVE; break;
2944 case OD_RANGE:
2945 clk_type = SMU_OD_RANGE; break;
2946 case OD_VDDGFX_OFFSET:
2947 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2948 case OD_CCLK:
2949 clk_type = SMU_OD_CCLK; break;
2950 case OD_FAN_CURVE:
2951 clk_type = SMU_OD_FAN_CURVE; break;
2952 case OD_ACOUSTIC_LIMIT:
2953 clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
2954 case OD_ACOUSTIC_TARGET:
2955 clk_type = SMU_OD_ACOUSTIC_TARGET; break;
2956 case OD_FAN_TARGET_TEMPERATURE:
2957 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
2958 case OD_FAN_MINIMUM_PWM:
2959 clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
2960 case OD_FAN_ZERO_RPM_ENABLE:
2961 clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;
2962 case OD_FAN_ZERO_RPM_STOP_TEMP:
2963 clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;
2964 default:
2965 clk_type = SMU_CLK_COUNT; break;
2966 }
2967
2968 return clk_type;
2969 }
2970
smu_print_ppclk_levels(void * handle,enum pp_clock_type type,char * buf)2971 static int smu_print_ppclk_levels(void *handle,
2972 enum pp_clock_type type,
2973 char *buf)
2974 {
2975 struct smu_context *smu = handle;
2976 enum smu_clk_type clk_type;
2977
2978 clk_type = smu_convert_to_smuclk(type);
2979 if (clk_type == SMU_CLK_COUNT)
2980 return -EINVAL;
2981
2982 return smu_print_smuclk_levels(smu, clk_type, buf);
2983 }
2984
smu_emit_ppclk_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)2985 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2986 {
2987 struct smu_context *smu = handle;
2988 enum smu_clk_type clk_type;
2989
2990 clk_type = smu_convert_to_smuclk(type);
2991 if (clk_type == SMU_CLK_COUNT)
2992 return -EINVAL;
2993
2994 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2995 return -EOPNOTSUPP;
2996
2997 if (!smu->ppt_funcs->emit_clk_levels)
2998 return -ENOENT;
2999
3000 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
3001
3002 }
3003
smu_od_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)3004 static int smu_od_edit_dpm_table(void *handle,
3005 enum PP_OD_DPM_TABLE_COMMAND type,
3006 long *input, uint32_t size)
3007 {
3008 struct smu_context *smu = handle;
3009 int ret = 0;
3010
3011 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3012 return -EOPNOTSUPP;
3013
3014 if (smu->ppt_funcs->od_edit_dpm_table) {
3015 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
3016 }
3017
3018 return ret;
3019 }
3020
smu_read_sensor(void * handle,int sensor,void * data,int * size_arg)3021 static int smu_read_sensor(void *handle,
3022 int sensor,
3023 void *data,
3024 int *size_arg)
3025 {
3026 struct smu_context *smu = handle;
3027 struct amdgpu_device *adev = smu->adev;
3028 struct smu_umd_pstate_table *pstate_table =
3029 &smu->pstate_table;
3030 int i, ret = 0;
3031 uint32_t *size, size_val;
3032
3033 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3034 return -EOPNOTSUPP;
3035
3036 if (!data || !size_arg)
3037 return -EINVAL;
3038
3039 size_val = *size_arg;
3040 size = &size_val;
3041
3042 if (smu->ppt_funcs->read_sensor)
3043 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
3044 goto unlock;
3045
3046 switch (sensor) {
3047 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
3048 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
3049 *size = 4;
3050 break;
3051 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
3052 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
3053 *size = 4;
3054 break;
3055 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
3056 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
3057 *size = 4;
3058 break;
3059 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
3060 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
3061 *size = 4;
3062 break;
3063 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3064 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
3065 *size = 8;
3066 break;
3067 case AMDGPU_PP_SENSOR_UVD_POWER:
3068 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
3069 *size = 4;
3070 break;
3071 case AMDGPU_PP_SENSOR_VCE_POWER:
3072 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
3073 *size = 4;
3074 break;
3075 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
3076 *(uint32_t *)data = 0;
3077 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
3078 if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {
3079 *(uint32_t *)data = 1;
3080 break;
3081 }
3082 }
3083 *size = 4;
3084 break;
3085 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
3086 *(uint32_t *)data = 0;
3087 *size = 4;
3088 break;
3089 default:
3090 *size = 0;
3091 ret = -EOPNOTSUPP;
3092 break;
3093 }
3094
3095 unlock:
3096 // assign uint32_t to int
3097 *size_arg = size_val;
3098
3099 return ret;
3100 }
3101
smu_get_apu_thermal_limit(void * handle,uint32_t * limit)3102 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
3103 {
3104 int ret = -EOPNOTSUPP;
3105 struct smu_context *smu = handle;
3106
3107 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3108 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3109
3110 return ret;
3111 }
3112
smu_set_apu_thermal_limit(void * handle,uint32_t limit)3113 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3114 {
3115 int ret = -EOPNOTSUPP;
3116 struct smu_context *smu = handle;
3117
3118 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3119 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3120
3121 return ret;
3122 }
3123
smu_get_power_profile_mode(void * handle,char * buf)3124 static int smu_get_power_profile_mode(void *handle, char *buf)
3125 {
3126 struct smu_context *smu = handle;
3127
3128 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3129 !smu->ppt_funcs->get_power_profile_mode)
3130 return -EOPNOTSUPP;
3131 if (!buf)
3132 return -EINVAL;
3133
3134 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3135 }
3136
smu_set_power_profile_mode(void * handle,long * param,uint32_t param_size)3137 static int smu_set_power_profile_mode(void *handle,
3138 long *param,
3139 uint32_t param_size)
3140 {
3141 struct smu_context *smu = handle;
3142 bool custom = false;
3143 int ret = 0;
3144
3145 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3146 !smu->ppt_funcs->set_power_profile_mode)
3147 return -EOPNOTSUPP;
3148
3149 if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
3150 custom = true;
3151 /* clear frontend mask so custom changes propogate */
3152 smu->workload_mask = 0;
3153 }
3154
3155 if ((param[param_size] != smu->power_profile_mode) || custom) {
3156 /* clear the old user preference */
3157 smu_power_profile_mode_put(smu, smu->power_profile_mode);
3158 /* set the new user preference */
3159 smu_power_profile_mode_get(smu, param[param_size]);
3160 ret = smu_bump_power_profile_mode(smu,
3161 custom ? param : NULL,
3162 custom ? param_size : 0);
3163 if (ret)
3164 smu_power_profile_mode_put(smu, param[param_size]);
3165 else
3166 /* store the user's preference */
3167 smu->power_profile_mode = param[param_size];
3168 }
3169
3170 return ret;
3171 }
3172
smu_get_fan_control_mode(void * handle,u32 * fan_mode)3173 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3174 {
3175 struct smu_context *smu = handle;
3176
3177 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3178 return -EOPNOTSUPP;
3179
3180 if (!smu->ppt_funcs->get_fan_control_mode)
3181 return -EOPNOTSUPP;
3182
3183 if (!fan_mode)
3184 return -EINVAL;
3185
3186 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3187
3188 return 0;
3189 }
3190
smu_set_fan_control_mode(void * handle,u32 value)3191 static int smu_set_fan_control_mode(void *handle, u32 value)
3192 {
3193 struct smu_context *smu = handle;
3194 int ret = 0;
3195
3196 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3197 return -EOPNOTSUPP;
3198
3199 if (!smu->ppt_funcs->set_fan_control_mode)
3200 return -EOPNOTSUPP;
3201
3202 if (value == U32_MAX)
3203 return -EINVAL;
3204
3205 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3206 if (ret)
3207 goto out;
3208
3209 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3210 smu->user_dpm_profile.fan_mode = value;
3211
3212 /* reset user dpm fan speed */
3213 if (value != AMD_FAN_CTRL_MANUAL) {
3214 smu->user_dpm_profile.fan_speed_pwm = 0;
3215 smu->user_dpm_profile.fan_speed_rpm = 0;
3216 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3217 }
3218 }
3219
3220 out:
3221 return ret;
3222 }
3223
smu_get_fan_speed_pwm(void * handle,u32 * speed)3224 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3225 {
3226 struct smu_context *smu = handle;
3227 int ret = 0;
3228
3229 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3230 return -EOPNOTSUPP;
3231
3232 if (!smu->ppt_funcs->get_fan_speed_pwm)
3233 return -EOPNOTSUPP;
3234
3235 if (!speed)
3236 return -EINVAL;
3237
3238 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3239
3240 return ret;
3241 }
3242
smu_set_fan_speed_pwm(void * handle,u32 speed)3243 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3244 {
3245 struct smu_context *smu = handle;
3246 int ret = 0;
3247
3248 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3249 return -EOPNOTSUPP;
3250
3251 if (!smu->ppt_funcs->set_fan_speed_pwm)
3252 return -EOPNOTSUPP;
3253
3254 if (speed == U32_MAX)
3255 return -EINVAL;
3256
3257 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3258 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3259 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3260 smu->user_dpm_profile.fan_speed_pwm = speed;
3261
3262 /* Override custom RPM setting as they cannot co-exist */
3263 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3264 smu->user_dpm_profile.fan_speed_rpm = 0;
3265 }
3266
3267 return ret;
3268 }
3269
smu_get_fan_speed_rpm(void * handle,uint32_t * speed)3270 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3271 {
3272 struct smu_context *smu = handle;
3273 int ret = 0;
3274
3275 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3276 return -EOPNOTSUPP;
3277
3278 if (!smu->ppt_funcs->get_fan_speed_rpm)
3279 return -EOPNOTSUPP;
3280
3281 if (!speed)
3282 return -EINVAL;
3283
3284 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3285
3286 return ret;
3287 }
3288
smu_set_deep_sleep_dcefclk(void * handle,uint32_t clk)3289 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3290 {
3291 struct smu_context *smu = handle;
3292
3293 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3294 return -EOPNOTSUPP;
3295
3296 return smu_set_min_dcef_deep_sleep(smu, clk);
3297 }
3298
smu_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)3299 static int smu_get_clock_by_type_with_latency(void *handle,
3300 enum amd_pp_clock_type type,
3301 struct pp_clock_levels_with_latency *clocks)
3302 {
3303 struct smu_context *smu = handle;
3304 enum smu_clk_type clk_type;
3305 int ret = 0;
3306
3307 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3308 return -EOPNOTSUPP;
3309
3310 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3311 switch (type) {
3312 case amd_pp_sys_clock:
3313 clk_type = SMU_GFXCLK;
3314 break;
3315 case amd_pp_mem_clock:
3316 clk_type = SMU_MCLK;
3317 break;
3318 case amd_pp_dcef_clock:
3319 clk_type = SMU_DCEFCLK;
3320 break;
3321 case amd_pp_disp_clock:
3322 clk_type = SMU_DISPCLK;
3323 break;
3324 default:
3325 dev_err(smu->adev->dev, "Invalid clock type!\n");
3326 return -EINVAL;
3327 }
3328
3329 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3330 }
3331
3332 return ret;
3333 }
3334
smu_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock_req)3335 static int smu_display_clock_voltage_request(void *handle,
3336 struct pp_display_clock_request *clock_req)
3337 {
3338 struct smu_context *smu = handle;
3339 int ret = 0;
3340
3341 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3342 return -EOPNOTSUPP;
3343
3344 if (smu->ppt_funcs->display_clock_voltage_request)
3345 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3346
3347 return ret;
3348 }
3349
3350
smu_display_disable_memory_clock_switch(void * handle,bool disable_memory_clock_switch)3351 static int smu_display_disable_memory_clock_switch(void *handle,
3352 bool disable_memory_clock_switch)
3353 {
3354 struct smu_context *smu = handle;
3355 int ret = -EINVAL;
3356
3357 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3358 return -EOPNOTSUPP;
3359
3360 if (smu->ppt_funcs->display_disable_memory_clock_switch)
3361 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3362
3363 return ret;
3364 }
3365
smu_set_xgmi_pstate(void * handle,uint32_t pstate)3366 static int smu_set_xgmi_pstate(void *handle,
3367 uint32_t pstate)
3368 {
3369 struct smu_context *smu = handle;
3370 int ret = 0;
3371
3372 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3373 return -EOPNOTSUPP;
3374
3375 if (smu->ppt_funcs->set_xgmi_pstate)
3376 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3377
3378 if (ret)
3379 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3380
3381 return ret;
3382 }
3383
smu_get_baco_capability(void * handle)3384 static int smu_get_baco_capability(void *handle)
3385 {
3386 struct smu_context *smu = handle;
3387
3388 if (!smu->pm_enabled)
3389 return false;
3390
3391 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3392 return false;
3393
3394 return smu->ppt_funcs->get_bamaco_support(smu);
3395 }
3396
smu_baco_set_state(void * handle,int state)3397 static int smu_baco_set_state(void *handle, int state)
3398 {
3399 struct smu_context *smu = handle;
3400 int ret = 0;
3401
3402 if (!smu->pm_enabled)
3403 return -EOPNOTSUPP;
3404
3405 if (state == 0) {
3406 if (smu->ppt_funcs->baco_exit)
3407 ret = smu->ppt_funcs->baco_exit(smu);
3408 } else if (state == 1) {
3409 if (smu->ppt_funcs->baco_enter)
3410 ret = smu->ppt_funcs->baco_enter(smu);
3411 } else {
3412 return -EINVAL;
3413 }
3414
3415 if (ret)
3416 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3417 (state)?"enter":"exit");
3418
3419 return ret;
3420 }
3421
smu_mode1_reset_is_support(struct smu_context * smu)3422 bool smu_mode1_reset_is_support(struct smu_context *smu)
3423 {
3424 bool ret = false;
3425
3426 if (!smu->pm_enabled)
3427 return false;
3428
3429 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3430 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3431
3432 return ret;
3433 }
3434
smu_link_reset_is_support(struct smu_context * smu)3435 bool smu_link_reset_is_support(struct smu_context *smu)
3436 {
3437 bool ret = false;
3438
3439 if (!smu->pm_enabled)
3440 return false;
3441
3442 if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
3443 ret = smu->ppt_funcs->link_reset_is_support(smu);
3444
3445 return ret;
3446 }
3447
smu_mode1_reset(struct smu_context * smu)3448 int smu_mode1_reset(struct smu_context *smu)
3449 {
3450 int ret = 0;
3451
3452 if (!smu->pm_enabled)
3453 return -EOPNOTSUPP;
3454
3455 if (smu->ppt_funcs->mode1_reset)
3456 ret = smu->ppt_funcs->mode1_reset(smu);
3457
3458 return ret;
3459 }
3460
smu_mode2_reset(void * handle)3461 static int smu_mode2_reset(void *handle)
3462 {
3463 struct smu_context *smu = handle;
3464 int ret = 0;
3465
3466 if (!smu->pm_enabled)
3467 return -EOPNOTSUPP;
3468
3469 if (smu->ppt_funcs->mode2_reset)
3470 ret = smu->ppt_funcs->mode2_reset(smu);
3471
3472 if (ret)
3473 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3474
3475 return ret;
3476 }
3477
smu_link_reset(struct smu_context * smu)3478 int smu_link_reset(struct smu_context *smu)
3479 {
3480 int ret = 0;
3481
3482 if (!smu->pm_enabled)
3483 return -EOPNOTSUPP;
3484
3485 if (smu->ppt_funcs->link_reset)
3486 ret = smu->ppt_funcs->link_reset(smu);
3487
3488 return ret;
3489 }
3490
smu_enable_gfx_features(void * handle)3491 static int smu_enable_gfx_features(void *handle)
3492 {
3493 struct smu_context *smu = handle;
3494 int ret = 0;
3495
3496 if (!smu->pm_enabled)
3497 return -EOPNOTSUPP;
3498
3499 if (smu->ppt_funcs->enable_gfx_features)
3500 ret = smu->ppt_funcs->enable_gfx_features(smu);
3501
3502 if (ret)
3503 dev_err(smu->adev->dev, "enable gfx features failed!\n");
3504
3505 return ret;
3506 }
3507
smu_get_max_sustainable_clocks_by_dc(void * handle,struct pp_smu_nv_clock_table * max_clocks)3508 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3509 struct pp_smu_nv_clock_table *max_clocks)
3510 {
3511 struct smu_context *smu = handle;
3512 int ret = 0;
3513
3514 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3515 return -EOPNOTSUPP;
3516
3517 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3518 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3519
3520 return ret;
3521 }
3522
smu_get_uclk_dpm_states(void * handle,unsigned int * clock_values_in_khz,unsigned int * num_states)3523 static int smu_get_uclk_dpm_states(void *handle,
3524 unsigned int *clock_values_in_khz,
3525 unsigned int *num_states)
3526 {
3527 struct smu_context *smu = handle;
3528 int ret = 0;
3529
3530 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3531 return -EOPNOTSUPP;
3532
3533 if (smu->ppt_funcs->get_uclk_dpm_states)
3534 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3535
3536 return ret;
3537 }
3538
smu_get_current_power_state(void * handle)3539 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3540 {
3541 struct smu_context *smu = handle;
3542 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3543
3544 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3545 return -EOPNOTSUPP;
3546
3547 if (smu->ppt_funcs->get_current_power_state)
3548 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3549
3550 return pm_state;
3551 }
3552
smu_get_dpm_clock_table(void * handle,struct dpm_clocks * clock_table)3553 static int smu_get_dpm_clock_table(void *handle,
3554 struct dpm_clocks *clock_table)
3555 {
3556 struct smu_context *smu = handle;
3557 int ret = 0;
3558
3559 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3560 return -EOPNOTSUPP;
3561
3562 if (smu->ppt_funcs->get_dpm_clock_table)
3563 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3564
3565 return ret;
3566 }
3567
smu_sys_get_gpu_metrics(void * handle,void ** table)3568 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3569 {
3570 struct smu_context *smu = handle;
3571
3572 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3573 return -EOPNOTSUPP;
3574
3575 if (!smu->ppt_funcs->get_gpu_metrics)
3576 return -EOPNOTSUPP;
3577
3578 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3579 }
3580
smu_sys_get_pm_metrics(void * handle,void * pm_metrics,size_t size)3581 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3582 size_t size)
3583 {
3584 struct smu_context *smu = handle;
3585
3586 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3587 return -EOPNOTSUPP;
3588
3589 if (!smu->ppt_funcs->get_pm_metrics)
3590 return -EOPNOTSUPP;
3591
3592 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3593 }
3594
smu_enable_mgpu_fan_boost(void * handle)3595 static int smu_enable_mgpu_fan_boost(void *handle)
3596 {
3597 struct smu_context *smu = handle;
3598 int ret = 0;
3599
3600 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3601 return -EOPNOTSUPP;
3602
3603 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3604 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3605
3606 return ret;
3607 }
3608
smu_gfx_state_change_set(void * handle,uint32_t state)3609 static int smu_gfx_state_change_set(void *handle,
3610 uint32_t state)
3611 {
3612 struct smu_context *smu = handle;
3613 int ret = 0;
3614
3615 if (smu->ppt_funcs->gfx_state_change_set)
3616 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3617
3618 return ret;
3619 }
3620
smu_handle_passthrough_sbr(struct smu_context * smu,bool enable)3621 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3622 {
3623 int ret = 0;
3624
3625 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3626 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3627
3628 return ret;
3629 }
3630
smu_get_ecc_info(struct smu_context * smu,void * umc_ecc)3631 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3632 {
3633 int ret = -EOPNOTSUPP;
3634
3635 if (smu->ppt_funcs &&
3636 smu->ppt_funcs->get_ecc_info)
3637 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3638
3639 return ret;
3640
3641 }
3642
smu_get_prv_buffer_details(void * handle,void ** addr,size_t * size)3643 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3644 {
3645 struct smu_context *smu = handle;
3646 struct smu_table_context *smu_table = &smu->smu_table;
3647 struct smu_table *memory_pool = &smu_table->memory_pool;
3648
3649 if (!addr || !size)
3650 return -EINVAL;
3651
3652 *addr = NULL;
3653 *size = 0;
3654 if (memory_pool->bo) {
3655 *addr = memory_pool->cpu_addr;
3656 *size = memory_pool->size;
3657 }
3658
3659 return 0;
3660 }
3661
smu_print_dpm_policy(struct smu_dpm_policy * policy,char * sysbuf,size_t * size)3662 static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3663 size_t *size)
3664 {
3665 size_t offset = *size;
3666 int level;
3667
3668 for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3669 if (level == policy->current_level)
3670 offset += sysfs_emit_at(sysbuf, offset,
3671 "%d : %s*\n", level,
3672 policy->desc->get_desc(policy, level));
3673 else
3674 offset += sysfs_emit_at(sysbuf, offset,
3675 "%d : %s\n", level,
3676 policy->desc->get_desc(policy, level));
3677 }
3678
3679 *size = offset;
3680 }
3681
smu_get_pm_policy_info(struct smu_context * smu,enum pp_pm_policy p_type,char * sysbuf)3682 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3683 enum pp_pm_policy p_type, char *sysbuf)
3684 {
3685 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3686 struct smu_dpm_policy_ctxt *policy_ctxt;
3687 struct smu_dpm_policy *dpm_policy;
3688 size_t offset = 0;
3689
3690 policy_ctxt = dpm_ctxt->dpm_policies;
3691 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3692 !policy_ctxt->policy_mask)
3693 return -EOPNOTSUPP;
3694
3695 if (p_type == PP_PM_POLICY_NONE)
3696 return -EINVAL;
3697
3698 dpm_policy = smu_get_pm_policy(smu, p_type);
3699 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3700 return -ENOENT;
3701
3702 if (!sysbuf)
3703 return -EINVAL;
3704
3705 smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3706
3707 return offset;
3708 }
3709
smu_get_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type)3710 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3711 enum pp_pm_policy p_type)
3712 {
3713 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3714 struct smu_dpm_policy_ctxt *policy_ctxt;
3715 int i;
3716
3717 policy_ctxt = dpm_ctxt->dpm_policies;
3718 if (!policy_ctxt)
3719 return NULL;
3720
3721 for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3722 if (policy_ctxt->policies[i].policy_type == p_type)
3723 return &policy_ctxt->policies[i];
3724 }
3725
3726 return NULL;
3727 }
3728
smu_set_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type,int level)3729 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3730 int level)
3731 {
3732 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3733 struct smu_dpm_policy *dpm_policy = NULL;
3734 struct smu_dpm_policy_ctxt *policy_ctxt;
3735 int ret = -EOPNOTSUPP;
3736
3737 policy_ctxt = dpm_ctxt->dpm_policies;
3738 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3739 !policy_ctxt->policy_mask)
3740 return ret;
3741
3742 if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3743 return -EINVAL;
3744
3745 dpm_policy = smu_get_pm_policy(smu, p_type);
3746
3747 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3748 return ret;
3749
3750 if (dpm_policy->current_level == level)
3751 return 0;
3752
3753 ret = dpm_policy->set_policy(smu, level);
3754
3755 if (!ret)
3756 dpm_policy->current_level = level;
3757
3758 return ret;
3759 }
3760
smu_sys_get_xcp_metrics(void * handle,int xcp_id,void * table)3761 static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
3762 {
3763 struct smu_context *smu = handle;
3764
3765 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3766 return -EOPNOTSUPP;
3767
3768 if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
3769 return -EOPNOTSUPP;
3770
3771 return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
3772 }
3773
3774 static const struct amd_pm_funcs swsmu_pm_funcs = {
3775 /* export for sysfs */
3776 .set_fan_control_mode = smu_set_fan_control_mode,
3777 .get_fan_control_mode = smu_get_fan_control_mode,
3778 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3779 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3780 .force_clock_level = smu_force_ppclk_levels,
3781 .print_clock_levels = smu_print_ppclk_levels,
3782 .emit_clock_levels = smu_emit_ppclk_levels,
3783 .force_performance_level = smu_force_performance_level,
3784 .read_sensor = smu_read_sensor,
3785 .get_apu_thermal_limit = smu_get_apu_thermal_limit,
3786 .set_apu_thermal_limit = smu_set_apu_thermal_limit,
3787 .get_performance_level = smu_get_performance_level,
3788 .get_current_power_state = smu_get_current_power_state,
3789 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3790 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3791 .get_pp_num_states = smu_get_power_num_states,
3792 .get_pp_table = smu_sys_get_pp_table,
3793 .set_pp_table = smu_sys_set_pp_table,
3794 .switch_power_profile = smu_switch_power_profile,
3795 .pause_power_profile = smu_pause_power_profile,
3796 /* export to amdgpu */
3797 .dispatch_tasks = smu_handle_dpm_task,
3798 .load_firmware = smu_load_microcode,
3799 .set_powergating_by_smu = smu_dpm_set_power_gate,
3800 .set_power_limit = smu_set_power_limit,
3801 .get_power_limit = smu_get_power_limit,
3802 .get_power_profile_mode = smu_get_power_profile_mode,
3803 .set_power_profile_mode = smu_set_power_profile_mode,
3804 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3805 .set_mp1_state = smu_set_mp1_state,
3806 .gfx_state_change_set = smu_gfx_state_change_set,
3807 /* export to DC */
3808 .get_sclk = smu_get_sclk,
3809 .get_mclk = smu_get_mclk,
3810 .display_configuration_change = smu_display_configuration_change,
3811 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3812 .display_clock_voltage_request = smu_display_clock_voltage_request,
3813 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3814 .set_active_display_count = smu_set_display_count,
3815 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3816 .get_asic_baco_capability = smu_get_baco_capability,
3817 .set_asic_baco_state = smu_baco_set_state,
3818 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3819 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3820 .asic_reset_mode_2 = smu_mode2_reset,
3821 .asic_reset_enable_gfx_features = smu_enable_gfx_features,
3822 .set_df_cstate = smu_set_df_cstate,
3823 .set_xgmi_pstate = smu_set_xgmi_pstate,
3824 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3825 .get_pm_metrics = smu_sys_get_pm_metrics,
3826 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3827 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3828 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3829 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3830 .get_dpm_clock_table = smu_get_dpm_clock_table,
3831 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3832 .get_xcp_metrics = smu_sys_get_xcp_metrics,
3833 };
3834
smu_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)3835 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3836 uint64_t event_arg)
3837 {
3838 int ret = -EINVAL;
3839
3840 if (smu->ppt_funcs->wait_for_event)
3841 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3842
3843 return ret;
3844 }
3845
smu_stb_collect_info(struct smu_context * smu,void * buf,uint32_t size)3846 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3847 {
3848
3849 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3850 return -EOPNOTSUPP;
3851
3852 /* Confirm the buffer allocated is of correct size */
3853 if (size != smu->stb_context.stb_buf_size)
3854 return -EINVAL;
3855
3856 /*
3857 * No need to lock smu mutex as we access STB directly through MMIO
3858 * and not going through SMU messaging route (for now at least).
3859 * For registers access rely on implementation internal locking.
3860 */
3861 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3862 }
3863
3864 #if defined(CONFIG_DEBUG_FS)
3865
smu_stb_debugfs_open(struct inode * inode,struct file * filp)3866 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3867 {
3868 struct amdgpu_device *adev = filp->f_inode->i_private;
3869 struct smu_context *smu = adev->powerplay.pp_handle;
3870 unsigned char *buf;
3871 int r;
3872
3873 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3874 if (!buf)
3875 return -ENOMEM;
3876
3877 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3878 if (r)
3879 goto out;
3880
3881 filp->private_data = buf;
3882
3883 return 0;
3884
3885 out:
3886 kvfree(buf);
3887 return r;
3888 }
3889
smu_stb_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)3890 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3891 loff_t *pos)
3892 {
3893 struct amdgpu_device *adev = filp->f_inode->i_private;
3894 struct smu_context *smu = adev->powerplay.pp_handle;
3895
3896
3897 if (!filp->private_data)
3898 return -EINVAL;
3899
3900 return simple_read_from_buffer(buf,
3901 size,
3902 pos, filp->private_data,
3903 smu->stb_context.stb_buf_size);
3904 }
3905
smu_stb_debugfs_release(struct inode * inode,struct file * filp)3906 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3907 {
3908 kvfree(filp->private_data);
3909 filp->private_data = NULL;
3910
3911 return 0;
3912 }
3913
3914 /*
3915 * We have to define not only read method but also
3916 * open and release because .read takes up to PAGE_SIZE
3917 * data each time so and so is invoked multiple times.
3918 * We allocate the STB buffer in .open and release it
3919 * in .release
3920 */
3921 static const struct file_operations smu_stb_debugfs_fops = {
3922 .owner = THIS_MODULE,
3923 .open = smu_stb_debugfs_open,
3924 .read = smu_stb_debugfs_read,
3925 .release = smu_stb_debugfs_release,
3926 .llseek = default_llseek,
3927 };
3928
3929 #endif
3930
amdgpu_smu_stb_debug_fs_init(struct amdgpu_device * adev)3931 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3932 {
3933 #if defined(CONFIG_DEBUG_FS)
3934
3935 struct smu_context *smu = adev->powerplay.pp_handle;
3936
3937 if (!smu || (!smu->stb_context.stb_buf_size))
3938 return;
3939
3940 debugfs_create_file_size("amdgpu_smu_stb_dump",
3941 S_IRUSR,
3942 adev_to_drm(adev)->primary->debugfs_root,
3943 adev,
3944 &smu_stb_debugfs_fops,
3945 smu->stb_context.stb_buf_size);
3946 #endif
3947 }
3948
smu_send_hbm_bad_pages_num(struct smu_context * smu,uint32_t size)3949 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3950 {
3951 int ret = 0;
3952
3953 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3954 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3955
3956 return ret;
3957 }
3958
smu_send_hbm_bad_channel_flag(struct smu_context * smu,uint32_t size)3959 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3960 {
3961 int ret = 0;
3962
3963 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3964 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3965
3966 return ret;
3967 }
3968
smu_send_rma_reason(struct smu_context * smu)3969 int smu_send_rma_reason(struct smu_context *smu)
3970 {
3971 int ret = 0;
3972
3973 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
3974 ret = smu->ppt_funcs->send_rma_reason(smu);
3975
3976 return ret;
3977 }
3978
3979 /**
3980 * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU
3981 * @smu: smu_context pointer
3982 *
3983 * This function checks if the SMU supports resetting the SDMA engine.
3984 * It returns true if supported, false otherwise.
3985 */
smu_reset_sdma_is_supported(struct smu_context * smu)3986 bool smu_reset_sdma_is_supported(struct smu_context *smu)
3987 {
3988 bool ret = false;
3989
3990 if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported)
3991 ret = smu->ppt_funcs->reset_sdma_is_supported(smu);
3992
3993 return ret;
3994 }
3995
smu_reset_sdma(struct smu_context * smu,uint32_t inst_mask)3996 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
3997 {
3998 int ret = 0;
3999
4000 if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)
4001 ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);
4002
4003 return ret;
4004 }
4005
smu_reset_vcn(struct smu_context * smu,uint32_t inst_mask)4006 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
4007 {
4008 if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
4009 smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
4010
4011 return 0;
4012 }
4013