1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L1
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "smu_v14_0_0_ppt.h"
48 #include "smu_v14_0_2_ppt.h"
49 #include "amd_pcie.h"
50
51 /*
52 * DO NOT use these for err/warn/info/debug messages.
53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54 * They are more MGPU friendly.
55 */
56 #undef pr_err
57 #undef pr_warn
58 #undef pr_info
59 #undef pr_debug
60
61 static const struct amd_pm_funcs swsmu_pm_funcs;
62 static int smu_force_smuclk_levels(struct smu_context *smu,
63 enum smu_clk_type clk_type,
64 uint32_t mask);
65 static int smu_handle_task(struct smu_context *smu,
66 enum amd_dpm_forced_level level,
67 enum amd_pp_task task_id);
68 static int smu_reset(struct smu_context *smu);
69 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70 static int smu_set_fan_control_mode(void *handle, u32 value);
71 static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit);
72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75 static void smu_power_profile_mode_get(struct smu_context *smu,
76 enum PP_SMC_POWER_PROFILE profile_mode);
77 static void smu_power_profile_mode_put(struct smu_context *smu,
78 enum PP_SMC_POWER_PROFILE profile_mode);
79 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
80 static int smu_od_edit_dpm_table(void *handle,
81 enum PP_OD_DPM_TABLE_COMMAND type,
82 long *input, uint32_t size);
83
smu_sys_get_pp_feature_mask(void * handle,char * buf)84 static int smu_sys_get_pp_feature_mask(void *handle,
85 char *buf)
86 {
87 struct smu_context *smu = handle;
88
89 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
90 return -EOPNOTSUPP;
91
92 return smu_get_pp_feature_mask(smu, buf);
93 }
94
smu_sys_set_pp_feature_mask(void * handle,uint64_t new_mask)95 static int smu_sys_set_pp_feature_mask(void *handle,
96 uint64_t new_mask)
97 {
98 struct smu_context *smu = handle;
99
100 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
101 return -EOPNOTSUPP;
102
103 return smu_set_pp_feature_mask(smu, new_mask);
104 }
105
smu_set_residency_gfxoff(struct smu_context * smu,bool value)106 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
107 {
108 if (!smu->ppt_funcs->set_gfx_off_residency)
109 return -EINVAL;
110
111 return smu_set_gfx_off_residency(smu, value);
112 }
113
smu_get_residency_gfxoff(struct smu_context * smu,u32 * value)114 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
115 {
116 if (!smu->ppt_funcs->get_gfx_off_residency)
117 return -EINVAL;
118
119 return smu_get_gfx_off_residency(smu, value);
120 }
121
smu_get_entrycount_gfxoff(struct smu_context * smu,u64 * value)122 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
123 {
124 if (!smu->ppt_funcs->get_gfx_off_entrycount)
125 return -EINVAL;
126
127 return smu_get_gfx_off_entrycount(smu, value);
128 }
129
smu_get_status_gfxoff(struct smu_context * smu,uint32_t * value)130 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
131 {
132 if (!smu->ppt_funcs->get_gfx_off_status)
133 return -EINVAL;
134
135 *value = smu_get_gfx_off_status(smu);
136
137 return 0;
138 }
139
smu_set_soft_freq_range(struct smu_context * smu,enum pp_clock_type type,uint32_t min,uint32_t max)140 int smu_set_soft_freq_range(struct smu_context *smu,
141 enum pp_clock_type type,
142 uint32_t min,
143 uint32_t max)
144 {
145 enum smu_clk_type clk_type;
146 int ret = 0;
147
148 clk_type = smu_convert_to_smuclk(type);
149 if (clk_type == SMU_CLK_COUNT)
150 return -EINVAL;
151
152 if (smu->ppt_funcs->set_soft_freq_limited_range)
153 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
154 clk_type,
155 min,
156 max,
157 false);
158
159 return ret;
160 }
161
smu_get_dpm_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)162 int smu_get_dpm_freq_range(struct smu_context *smu,
163 enum smu_clk_type clk_type,
164 uint32_t *min,
165 uint32_t *max)
166 {
167 int ret = -ENOTSUPP;
168
169 if (!min && !max)
170 return -EINVAL;
171
172 if (smu->ppt_funcs->get_dpm_ultimate_freq)
173 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
174 clk_type,
175 min,
176 max);
177
178 return ret;
179 }
180
smu_set_gfx_power_up_by_imu(struct smu_context * smu)181 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
182 {
183 int ret = 0;
184 struct amdgpu_device *adev = smu->adev;
185
186 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
187 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
188 if (ret)
189 dev_err(adev->dev, "Failed to enable gfx imu!\n");
190 }
191 return ret;
192 }
193
smu_get_mclk(void * handle,bool low)194 static u32 smu_get_mclk(void *handle, bool low)
195 {
196 struct smu_context *smu = handle;
197 uint32_t clk_freq;
198 int ret = 0;
199
200 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
201 low ? &clk_freq : NULL,
202 !low ? &clk_freq : NULL);
203 if (ret)
204 return 0;
205 return clk_freq * 100;
206 }
207
smu_get_sclk(void * handle,bool low)208 static u32 smu_get_sclk(void *handle, bool low)
209 {
210 struct smu_context *smu = handle;
211 uint32_t clk_freq;
212 int ret = 0;
213
214 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
215 low ? &clk_freq : NULL,
216 !low ? &clk_freq : NULL);
217 if (ret)
218 return 0;
219 return clk_freq * 100;
220 }
221
smu_set_gfx_imu_enable(struct smu_context * smu)222 static int smu_set_gfx_imu_enable(struct smu_context *smu)
223 {
224 struct amdgpu_device *adev = smu->adev;
225
226 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
227 return 0;
228
229 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
230 return 0;
231
232 return smu_set_gfx_power_up_by_imu(smu);
233 }
234
is_vcn_enabled(struct amdgpu_device * adev)235 static bool is_vcn_enabled(struct amdgpu_device *adev)
236 {
237 int i;
238
239 for (i = 0; i < adev->num_ip_blocks; i++) {
240 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
241 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
242 !adev->ip_blocks[i].status.valid)
243 return false;
244 }
245
246 return true;
247 }
248
smu_dpm_set_vcn_enable(struct smu_context * smu,bool enable,int inst)249 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
250 bool enable,
251 int inst)
252 {
253 struct smu_power_context *smu_power = &smu->smu_power;
254 struct smu_power_gate *power_gate = &smu_power->power_gate;
255 int ret = 0;
256
257 /*
258 * don't poweron vcn/jpeg when they are skipped.
259 */
260 if (!is_vcn_enabled(smu->adev))
261 return 0;
262
263 if (!smu->ppt_funcs->dpm_set_vcn_enable)
264 return 0;
265
266 if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)
267 return 0;
268
269 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);
270 if (!ret)
271 atomic_set(&power_gate->vcn_gated[inst], !enable);
272
273 return ret;
274 }
275
smu_dpm_set_jpeg_enable(struct smu_context * smu,bool enable)276 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
277 bool enable)
278 {
279 struct smu_power_context *smu_power = &smu->smu_power;
280 struct smu_power_gate *power_gate = &smu_power->power_gate;
281 int ret = 0;
282
283 if (!is_vcn_enabled(smu->adev))
284 return 0;
285
286 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
287 return 0;
288
289 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
290 return 0;
291
292 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
293 if (!ret)
294 atomic_set(&power_gate->jpeg_gated, !enable);
295
296 return ret;
297 }
298
smu_dpm_set_vpe_enable(struct smu_context * smu,bool enable)299 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
300 bool enable)
301 {
302 struct smu_power_context *smu_power = &smu->smu_power;
303 struct smu_power_gate *power_gate = &smu_power->power_gate;
304 int ret = 0;
305
306 if (!smu->ppt_funcs->dpm_set_vpe_enable)
307 return 0;
308
309 if (atomic_read(&power_gate->vpe_gated) ^ enable)
310 return 0;
311
312 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
313 if (!ret)
314 atomic_set(&power_gate->vpe_gated, !enable);
315
316 return ret;
317 }
318
smu_dpm_set_isp_enable(struct smu_context * smu,bool enable)319 static int smu_dpm_set_isp_enable(struct smu_context *smu,
320 bool enable)
321 {
322 struct smu_power_context *smu_power = &smu->smu_power;
323 struct smu_power_gate *power_gate = &smu_power->power_gate;
324 int ret;
325
326 if (!smu->ppt_funcs->dpm_set_isp_enable)
327 return 0;
328
329 if (atomic_read(&power_gate->isp_gated) ^ enable)
330 return 0;
331
332 ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);
333 if (!ret)
334 atomic_set(&power_gate->isp_gated, !enable);
335
336 return ret;
337 }
338
smu_dpm_set_umsch_mm_enable(struct smu_context * smu,bool enable)339 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
340 bool enable)
341 {
342 struct smu_power_context *smu_power = &smu->smu_power;
343 struct smu_power_gate *power_gate = &smu_power->power_gate;
344 int ret = 0;
345
346 if (!smu->adev->enable_umsch_mm)
347 return 0;
348
349 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
350 return 0;
351
352 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
353 return 0;
354
355 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
356 if (!ret)
357 atomic_set(&power_gate->umsch_mm_gated, !enable);
358
359 return ret;
360 }
361
smu_set_mall_enable(struct smu_context * smu)362 static int smu_set_mall_enable(struct smu_context *smu)
363 {
364 int ret = 0;
365
366 if (!smu->ppt_funcs->set_mall_enable)
367 return 0;
368
369 ret = smu->ppt_funcs->set_mall_enable(smu);
370
371 return ret;
372 }
373
374 /**
375 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
376 *
377 * @handle: smu_context pointer
378 * @block_type: the IP block to power gate/ungate
379 * @gate: to power gate if true, ungate otherwise
380 * @inst: the instance of the IP block to power gate/ungate
381 *
382 * This API uses no smu->mutex lock protection due to:
383 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
384 * This is guarded to be race condition free by the caller.
385 * 2. Or get called on user setting request of power_dpm_force_performance_level.
386 * Under this case, the smu->mutex lock protection is already enforced on
387 * the parent API smu_force_performance_level of the call path.
388 */
smu_dpm_set_power_gate(void * handle,uint32_t block_type,bool gate,int inst)389 static int smu_dpm_set_power_gate(void *handle,
390 uint32_t block_type,
391 bool gate,
392 int inst)
393 {
394 struct smu_context *smu = handle;
395 int ret = 0;
396
397 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
398 dev_WARN(smu->adev->dev,
399 "SMU uninitialized but power %s requested for %u!\n",
400 gate ? "gate" : "ungate", block_type);
401 return -EOPNOTSUPP;
402 }
403
404 switch (block_type) {
405 /*
406 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
407 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
408 */
409 case AMD_IP_BLOCK_TYPE_UVD:
410 case AMD_IP_BLOCK_TYPE_VCN:
411 ret = smu_dpm_set_vcn_enable(smu, !gate, inst);
412 if (ret)
413 dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",
414 gate ? "gate" : "ungate", inst);
415 break;
416 case AMD_IP_BLOCK_TYPE_GFX:
417 ret = smu_gfx_off_control(smu, gate);
418 if (ret)
419 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
420 gate ? "enable" : "disable");
421 break;
422 case AMD_IP_BLOCK_TYPE_SDMA:
423 ret = smu_powergate_sdma(smu, gate);
424 if (ret)
425 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
426 gate ? "gate" : "ungate");
427 break;
428 case AMD_IP_BLOCK_TYPE_JPEG:
429 ret = smu_dpm_set_jpeg_enable(smu, !gate);
430 if (ret)
431 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
432 gate ? "gate" : "ungate");
433 break;
434 case AMD_IP_BLOCK_TYPE_VPE:
435 ret = smu_dpm_set_vpe_enable(smu, !gate);
436 if (ret)
437 dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
438 gate ? "gate" : "ungate");
439 break;
440 case AMD_IP_BLOCK_TYPE_ISP:
441 ret = smu_dpm_set_isp_enable(smu, !gate);
442 if (ret)
443 dev_err(smu->adev->dev, "Failed to power %s ISP!\n",
444 gate ? "gate" : "ungate");
445 break;
446 default:
447 dev_err(smu->adev->dev, "Unsupported block type!\n");
448 return -EINVAL;
449 }
450
451 return ret;
452 }
453
454 /**
455 * smu_set_user_clk_dependencies - set user profile clock dependencies
456 *
457 * @smu: smu_context pointer
458 * @clk: enum smu_clk_type type
459 *
460 * Enable/Disable the clock dependency for the @clk type.
461 */
smu_set_user_clk_dependencies(struct smu_context * smu,enum smu_clk_type clk)462 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
463 {
464 if (smu->adev->in_suspend)
465 return;
466
467 if (clk == SMU_MCLK) {
468 smu->user_dpm_profile.clk_dependency = 0;
469 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
470 } else if (clk == SMU_FCLK) {
471 /* MCLK takes precedence over FCLK */
472 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
473 return;
474
475 smu->user_dpm_profile.clk_dependency = 0;
476 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
477 } else if (clk == SMU_SOCCLK) {
478 /* MCLK takes precedence over SOCCLK */
479 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
480 return;
481
482 smu->user_dpm_profile.clk_dependency = 0;
483 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
484 } else
485 /* Add clk dependencies here, if any */
486 return;
487 }
488
489 /**
490 * smu_restore_dpm_user_profile - reinstate user dpm profile
491 *
492 * @smu: smu_context pointer
493 *
494 * Restore the saved user power configurations include power limit,
495 * clock frequencies, fan control mode and fan speed.
496 */
smu_restore_dpm_user_profile(struct smu_context * smu)497 static void smu_restore_dpm_user_profile(struct smu_context *smu)
498 {
499 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
500 int ret = 0;
501
502 if (!smu->adev->in_suspend)
503 return;
504
505 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
506 return;
507
508 /* Enable restore flag */
509 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
510
511 /* set the user dpm power limits */
512 for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) {
513 if (!smu->user_dpm_profile.power_limits[i])
514 continue;
515 ret = smu_set_power_limit(smu, i,
516 smu->user_dpm_profile.power_limits[i]);
517 if (ret)
518 dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i);
519 }
520
521 /* set the user dpm clock configurations */
522 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
523 enum smu_clk_type clk_type;
524
525 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
526 /*
527 * Iterate over smu clk type and force the saved user clk
528 * configs, skip if clock dependency is enabled
529 */
530 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
531 smu->user_dpm_profile.clk_mask[clk_type]) {
532 ret = smu_force_smuclk_levels(smu, clk_type,
533 smu->user_dpm_profile.clk_mask[clk_type]);
534 if (ret)
535 dev_err(smu->adev->dev,
536 "Failed to set clock type = %d\n", clk_type);
537 }
538 }
539 }
540
541 /* set the user dpm fan configurations */
542 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
543 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
544 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
545 if (ret != -EOPNOTSUPP) {
546 smu->user_dpm_profile.fan_speed_pwm = 0;
547 smu->user_dpm_profile.fan_speed_rpm = 0;
548 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
549 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
550 }
551
552 if (smu->user_dpm_profile.fan_speed_pwm) {
553 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
554 if (ret != -EOPNOTSUPP)
555 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
556 }
557
558 if (smu->user_dpm_profile.fan_speed_rpm) {
559 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
560 if (ret != -EOPNOTSUPP)
561 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
562 }
563 }
564
565 /* Restore user customized OD settings */
566 if (smu->user_dpm_profile.user_od) {
567 if (smu->ppt_funcs->restore_user_od_settings) {
568 ret = smu->ppt_funcs->restore_user_od_settings(smu);
569 if (ret)
570 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
571 }
572 }
573
574 /* Disable restore flag */
575 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
576 }
577
smu_get_power_num_states(void * handle,struct pp_states_info * state_info)578 static int smu_get_power_num_states(void *handle,
579 struct pp_states_info *state_info)
580 {
581 if (!state_info)
582 return -EINVAL;
583
584 /* not support power state */
585 memset(state_info, 0, sizeof(struct pp_states_info));
586 state_info->nums = 1;
587 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
588
589 return 0;
590 }
591
is_support_sw_smu(struct amdgpu_device * adev)592 bool is_support_sw_smu(struct amdgpu_device *adev)
593 {
594 /* vega20 is 11.0.2, but it's supported via the powerplay code */
595 if (adev->asic_type == CHIP_VEGA20)
596 return false;
597
598 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
599 amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
600 return true;
601
602 return false;
603 }
604
is_support_cclk_dpm(struct amdgpu_device * adev)605 bool is_support_cclk_dpm(struct amdgpu_device *adev)
606 {
607 struct smu_context *smu = adev->powerplay.pp_handle;
608
609 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
610 return false;
611
612 return true;
613 }
614
amdgpu_smu_ras_send_msg(struct amdgpu_device * adev,enum smu_message_type msg,uint32_t param,uint32_t * read_arg)615 int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
616 uint32_t param, uint32_t *read_arg)
617 {
618 struct smu_context *smu = adev->powerplay.pp_handle;
619 int ret = -EOPNOTSUPP;
620
621 if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg)
622 ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg);
623
624 return ret;
625 }
626
smu_sys_get_pp_table(void * handle,char ** table)627 static int smu_sys_get_pp_table(void *handle,
628 char **table)
629 {
630 struct smu_context *smu = handle;
631 struct smu_table_context *smu_table = &smu->smu_table;
632
633 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
634 return -EOPNOTSUPP;
635
636 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
637 return -EOPNOTSUPP;
638
639 if (smu_table->hardcode_pptable)
640 *table = smu_table->hardcode_pptable;
641 else
642 *table = smu_table->power_play_table;
643
644 return smu_table->power_play_table_size;
645 }
646
smu_sys_set_pp_table(void * handle,const char * buf,size_t size)647 static int smu_sys_set_pp_table(void *handle,
648 const char *buf,
649 size_t size)
650 {
651 struct smu_context *smu = handle;
652 struct smu_table_context *smu_table = &smu->smu_table;
653 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
654 int ret = 0;
655
656 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
657 return -EOPNOTSUPP;
658
659 if (header->usStructureSize != size) {
660 dev_err(smu->adev->dev, "pp table size not matched !\n");
661 return -EIO;
662 }
663
664 if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
665 kfree(smu_table->hardcode_pptable);
666 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
667 if (!smu_table->hardcode_pptable)
668 return -ENOMEM;
669 }
670
671 memcpy(smu_table->hardcode_pptable, buf, size);
672 smu_table->power_play_table = smu_table->hardcode_pptable;
673 smu_table->power_play_table_size = size;
674
675 /*
676 * Special hw_fini action(for Navi1x, the DPMs disablement will be
677 * skipped) may be needed for custom pptable uploading.
678 */
679 smu->uploading_custom_pp_table = true;
680
681 ret = smu_reset(smu);
682 if (ret)
683 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
684
685 smu->uploading_custom_pp_table = false;
686
687 return ret;
688 }
689
smu_get_driver_allowed_feature_mask(struct smu_context * smu)690 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
691 {
692 struct smu_feature *feature = &smu->smu_feature;
693 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
694 int ret = 0;
695
696 /*
697 * With SCPM enabled, the allowed featuremasks setting(via
698 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
699 * That means there is no way to let PMFW knows the settings below.
700 * Thus, we just assume all the features are allowed under
701 * such scenario.
702 */
703 if (smu->adev->scpm_enabled) {
704 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
705 return 0;
706 }
707
708 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
709
710 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
711 SMU_FEATURE_MAX/32);
712 if (ret)
713 return ret;
714
715 bitmap_or(feature->allowed, feature->allowed,
716 (unsigned long *)allowed_feature_mask,
717 feature->feature_num);
718
719 return ret;
720 }
721
smu_set_funcs(struct amdgpu_device * adev)722 static int smu_set_funcs(struct amdgpu_device *adev)
723 {
724 struct smu_context *smu = adev->powerplay.pp_handle;
725
726 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
727 smu->od_enabled = true;
728
729 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
730 case IP_VERSION(11, 0, 0):
731 case IP_VERSION(11, 0, 5):
732 case IP_VERSION(11, 0, 9):
733 navi10_set_ppt_funcs(smu);
734 break;
735 case IP_VERSION(11, 0, 7):
736 case IP_VERSION(11, 0, 11):
737 case IP_VERSION(11, 0, 12):
738 case IP_VERSION(11, 0, 13):
739 sienna_cichlid_set_ppt_funcs(smu);
740 break;
741 case IP_VERSION(12, 0, 0):
742 case IP_VERSION(12, 0, 1):
743 renoir_set_ppt_funcs(smu);
744 break;
745 case IP_VERSION(11, 5, 0):
746 case IP_VERSION(11, 5, 2):
747 vangogh_set_ppt_funcs(smu);
748 break;
749 case IP_VERSION(13, 0, 1):
750 case IP_VERSION(13, 0, 3):
751 case IP_VERSION(13, 0, 8):
752 yellow_carp_set_ppt_funcs(smu);
753 break;
754 case IP_VERSION(13, 0, 4):
755 case IP_VERSION(13, 0, 11):
756 smu_v13_0_4_set_ppt_funcs(smu);
757 break;
758 case IP_VERSION(13, 0, 5):
759 smu_v13_0_5_set_ppt_funcs(smu);
760 break;
761 case IP_VERSION(11, 0, 8):
762 cyan_skillfish_set_ppt_funcs(smu);
763 break;
764 case IP_VERSION(11, 0, 2):
765 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
766 arcturus_set_ppt_funcs(smu);
767 /* OD is not supported on Arcturus */
768 smu->od_enabled = false;
769 break;
770 case IP_VERSION(13, 0, 2):
771 aldebaran_set_ppt_funcs(smu);
772 /* Enable pp_od_clk_voltage node */
773 smu->od_enabled = true;
774 break;
775 case IP_VERSION(13, 0, 0):
776 case IP_VERSION(13, 0, 10):
777 smu_v13_0_0_set_ppt_funcs(smu);
778 break;
779 case IP_VERSION(13, 0, 6):
780 case IP_VERSION(13, 0, 14):
781 case IP_VERSION(13, 0, 12):
782 smu_v13_0_6_set_ppt_funcs(smu);
783 /* Enable pp_od_clk_voltage node */
784 smu->od_enabled = true;
785 break;
786 case IP_VERSION(13, 0, 7):
787 smu_v13_0_7_set_ppt_funcs(smu);
788 break;
789 case IP_VERSION(14, 0, 0):
790 case IP_VERSION(14, 0, 1):
791 case IP_VERSION(14, 0, 4):
792 case IP_VERSION(14, 0, 5):
793 smu_v14_0_0_set_ppt_funcs(smu);
794 break;
795 case IP_VERSION(14, 0, 2):
796 case IP_VERSION(14, 0, 3):
797 smu_v14_0_2_set_ppt_funcs(smu);
798 break;
799 default:
800 return -EINVAL;
801 }
802
803 return 0;
804 }
805
smu_early_init(struct amdgpu_ip_block * ip_block)806 static int smu_early_init(struct amdgpu_ip_block *ip_block)
807 {
808 struct amdgpu_device *adev = ip_block->adev;
809 struct smu_context *smu;
810 int r;
811
812 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
813 if (!smu)
814 return -ENOMEM;
815
816 smu->adev = adev;
817 smu->pm_enabled = !!amdgpu_dpm;
818 smu->is_apu = false;
819 smu->smu_baco.state = SMU_BACO_STATE_NONE;
820 smu->smu_baco.platform_support = false;
821 smu->smu_baco.maco_support = false;
822 smu->user_dpm_profile.fan_mode = -1;
823 smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
824
825 mutex_init(&smu->message_lock);
826
827 adev->powerplay.pp_handle = smu;
828 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
829
830 r = smu_set_funcs(adev);
831 if (r)
832 return r;
833 return smu_init_microcode(smu);
834 }
835
smu_set_default_dpm_table(struct smu_context * smu)836 static int smu_set_default_dpm_table(struct smu_context *smu)
837 {
838 struct amdgpu_device *adev = smu->adev;
839 struct smu_power_context *smu_power = &smu->smu_power;
840 struct smu_power_gate *power_gate = &smu_power->power_gate;
841 int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;
842 int ret = 0;
843
844 if (!smu->ppt_funcs->set_default_dpm_table)
845 return 0;
846
847 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
848 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
849 vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);
850 }
851 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
852 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
853
854 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
855 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
856 ret = smu_dpm_set_vcn_enable(smu, true, i);
857 if (ret)
858 return ret;
859 }
860 }
861
862 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
863 ret = smu_dpm_set_jpeg_enable(smu, true);
864 if (ret)
865 goto err_out;
866 }
867
868 ret = smu->ppt_funcs->set_default_dpm_table(smu);
869 if (ret)
870 dev_err(smu->adev->dev,
871 "Failed to setup default dpm clock tables!\n");
872
873 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
874 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
875 err_out:
876 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
877 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
878 smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);
879 }
880
881 return ret;
882 }
883
smu_apply_default_config_table_settings(struct smu_context * smu)884 static int smu_apply_default_config_table_settings(struct smu_context *smu)
885 {
886 struct amdgpu_device *adev = smu->adev;
887 int ret = 0;
888
889 ret = smu_get_default_config_table_settings(smu,
890 &adev->pm.config_table);
891 if (ret)
892 return ret;
893
894 return smu_set_config_table(smu, &adev->pm.config_table);
895 }
896
smu_late_init(struct amdgpu_ip_block * ip_block)897 static int smu_late_init(struct amdgpu_ip_block *ip_block)
898 {
899 struct amdgpu_device *adev = ip_block->adev;
900 struct smu_context *smu = adev->powerplay.pp_handle;
901 int ret = 0;
902
903 smu_set_fine_grain_gfx_freq_parameters(smu);
904
905 if (!smu->pm_enabled)
906 return 0;
907
908 ret = smu_post_init(smu);
909 if (ret) {
910 dev_err(adev->dev, "Failed to post smu init!\n");
911 return ret;
912 }
913
914 /*
915 * Explicitly notify PMFW the power mode the system in. Since
916 * the PMFW may boot the ASIC with a different mode.
917 * For those supporting ACDC switch via gpio, PMFW will
918 * handle the switch automatically. Driver involvement
919 * is unnecessary.
920 */
921 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
922 smu_set_ac_dc(smu);
923
924 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
925 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
926 return 0;
927
928 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
929 ret = smu_set_default_od_settings(smu);
930 if (ret) {
931 dev_err(adev->dev, "Failed to setup default OD settings!\n");
932 return ret;
933 }
934 }
935
936 ret = smu_populate_umd_state_clk(smu);
937 if (ret) {
938 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
939 return ret;
940 }
941
942 ret = smu_get_asic_power_limits(smu,
943 &smu->current_power_limit,
944 &smu->default_power_limit,
945 &smu->max_power_limit,
946 &smu->min_power_limit);
947 if (ret) {
948 dev_err(adev->dev, "Failed to get asic power limits!\n");
949 return ret;
950 }
951
952 if (!amdgpu_sriov_vf(adev))
953 smu_get_unique_id(smu);
954
955 smu_get_fan_parameters(smu);
956
957 smu_handle_task(smu,
958 smu->smu_dpm.dpm_level,
959 AMD_PP_TASK_COMPLETE_INIT);
960
961 ret = smu_apply_default_config_table_settings(smu);
962 if (ret && (ret != -EOPNOTSUPP)) {
963 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
964 return ret;
965 }
966
967 smu_restore_dpm_user_profile(smu);
968
969 return 0;
970 }
971
smu_init_fb_allocations(struct smu_context * smu)972 static int smu_init_fb_allocations(struct smu_context *smu)
973 {
974 struct amdgpu_device *adev = smu->adev;
975 struct smu_table_context *smu_table = &smu->smu_table;
976 struct smu_table *tables = smu_table->tables;
977 struct smu_table *driver_table = &(smu_table->driver_table);
978 uint32_t max_table_size = 0;
979 int ret, i;
980
981 /* VRAM allocation for tool table */
982 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
983 ret = amdgpu_bo_create_kernel(adev,
984 tables[SMU_TABLE_PMSTATUSLOG].size,
985 tables[SMU_TABLE_PMSTATUSLOG].align,
986 tables[SMU_TABLE_PMSTATUSLOG].domain,
987 &tables[SMU_TABLE_PMSTATUSLOG].bo,
988 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
989 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
990 if (ret) {
991 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
992 return ret;
993 }
994 }
995
996 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
997 /* VRAM allocation for driver table */
998 for (i = 0; i < SMU_TABLE_COUNT; i++) {
999 if (tables[i].size == 0)
1000 continue;
1001
1002 /* If one of the tables has VRAM domain restriction, keep it in
1003 * VRAM
1004 */
1005 if ((tables[i].domain &
1006 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
1007 AMDGPU_GEM_DOMAIN_VRAM)
1008 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
1009
1010 if (i == SMU_TABLE_PMSTATUSLOG)
1011 continue;
1012
1013 if (max_table_size < tables[i].size)
1014 max_table_size = tables[i].size;
1015 }
1016
1017 driver_table->size = max_table_size;
1018 driver_table->align = PAGE_SIZE;
1019
1020 ret = amdgpu_bo_create_kernel(adev,
1021 driver_table->size,
1022 driver_table->align,
1023 driver_table->domain,
1024 &driver_table->bo,
1025 &driver_table->mc_address,
1026 &driver_table->cpu_addr);
1027 if (ret) {
1028 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
1029 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1030 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1031 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1032 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1033 }
1034
1035 return ret;
1036 }
1037
smu_fini_fb_allocations(struct smu_context * smu)1038 static int smu_fini_fb_allocations(struct smu_context *smu)
1039 {
1040 struct smu_table_context *smu_table = &smu->smu_table;
1041 struct smu_table *tables = smu_table->tables;
1042 struct smu_table *driver_table = &(smu_table->driver_table);
1043
1044 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1045 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1046 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1047 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1048
1049 amdgpu_bo_free_kernel(&driver_table->bo,
1050 &driver_table->mc_address,
1051 &driver_table->cpu_addr);
1052
1053 return 0;
1054 }
1055
smu_update_gpu_addresses(struct smu_context * smu)1056 static void smu_update_gpu_addresses(struct smu_context *smu)
1057 {
1058 struct smu_table_context *smu_table = &smu->smu_table;
1059 struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;
1060 struct smu_table *driver_table = &(smu_table->driver_table);
1061 struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;
1062
1063 if (pm_status_table->bo)
1064 pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);
1065 if (driver_table->bo)
1066 driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);
1067 if (dummy_read_1_table->bo)
1068 dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);
1069 }
1070
1071 /**
1072 * smu_alloc_memory_pool - allocate memory pool in the system memory
1073 *
1074 * @smu: amdgpu_device pointer
1075 *
1076 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1077 * and DramLogSetDramAddr can notify it changed.
1078 *
1079 * Returns 0 on success, error on failure.
1080 */
smu_alloc_memory_pool(struct smu_context * smu)1081 static int smu_alloc_memory_pool(struct smu_context *smu)
1082 {
1083 struct amdgpu_device *adev = smu->adev;
1084 struct smu_table_context *smu_table = &smu->smu_table;
1085 struct smu_table *memory_pool = &smu_table->memory_pool;
1086 uint64_t pool_size = smu->pool_size;
1087 int ret = 0;
1088
1089 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1090 return ret;
1091
1092 memory_pool->size = pool_size;
1093 memory_pool->align = PAGE_SIZE;
1094 memory_pool->domain =
1095 (adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ?
1096 AMDGPU_GEM_DOMAIN_VRAM :
1097 AMDGPU_GEM_DOMAIN_GTT;
1098
1099 switch (pool_size) {
1100 case SMU_MEMORY_POOL_SIZE_256_MB:
1101 case SMU_MEMORY_POOL_SIZE_512_MB:
1102 case SMU_MEMORY_POOL_SIZE_1_GB:
1103 case SMU_MEMORY_POOL_SIZE_2_GB:
1104 ret = amdgpu_bo_create_kernel(adev,
1105 memory_pool->size,
1106 memory_pool->align,
1107 memory_pool->domain,
1108 &memory_pool->bo,
1109 &memory_pool->mc_address,
1110 &memory_pool->cpu_addr);
1111 if (ret)
1112 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1113 break;
1114 default:
1115 break;
1116 }
1117
1118 return ret;
1119 }
1120
smu_free_memory_pool(struct smu_context * smu)1121 static int smu_free_memory_pool(struct smu_context *smu)
1122 {
1123 struct smu_table_context *smu_table = &smu->smu_table;
1124 struct smu_table *memory_pool = &smu_table->memory_pool;
1125
1126 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1127 return 0;
1128
1129 amdgpu_bo_free_kernel(&memory_pool->bo,
1130 &memory_pool->mc_address,
1131 &memory_pool->cpu_addr);
1132
1133 memset(memory_pool, 0, sizeof(struct smu_table));
1134
1135 return 0;
1136 }
1137
smu_alloc_dummy_read_table(struct smu_context * smu)1138 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1139 {
1140 struct smu_table_context *smu_table = &smu->smu_table;
1141 struct smu_table *dummy_read_1_table =
1142 &smu_table->dummy_read_1_table;
1143 struct amdgpu_device *adev = smu->adev;
1144 int ret = 0;
1145
1146 if (!dummy_read_1_table->size)
1147 return 0;
1148
1149 ret = amdgpu_bo_create_kernel(adev,
1150 dummy_read_1_table->size,
1151 dummy_read_1_table->align,
1152 dummy_read_1_table->domain,
1153 &dummy_read_1_table->bo,
1154 &dummy_read_1_table->mc_address,
1155 &dummy_read_1_table->cpu_addr);
1156 if (ret)
1157 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1158
1159 return ret;
1160 }
1161
smu_free_dummy_read_table(struct smu_context * smu)1162 static void smu_free_dummy_read_table(struct smu_context *smu)
1163 {
1164 struct smu_table_context *smu_table = &smu->smu_table;
1165 struct smu_table *dummy_read_1_table =
1166 &smu_table->dummy_read_1_table;
1167
1168
1169 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1170 &dummy_read_1_table->mc_address,
1171 &dummy_read_1_table->cpu_addr);
1172
1173 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1174 }
1175
smu_smc_table_sw_init(struct smu_context * smu)1176 static int smu_smc_table_sw_init(struct smu_context *smu)
1177 {
1178 int ret;
1179
1180 /**
1181 * Create smu_table structure, and init smc tables such as
1182 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1183 */
1184 ret = smu_init_smc_tables(smu);
1185 if (ret) {
1186 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1187 return ret;
1188 }
1189
1190 /**
1191 * Create smu_power_context structure, and allocate smu_dpm_context and
1192 * context size to fill the smu_power_context data.
1193 */
1194 ret = smu_init_power(smu);
1195 if (ret) {
1196 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1197 return ret;
1198 }
1199
1200 /*
1201 * allocate vram bos to store smc table contents.
1202 */
1203 ret = smu_init_fb_allocations(smu);
1204 if (ret)
1205 return ret;
1206
1207 ret = smu_alloc_memory_pool(smu);
1208 if (ret)
1209 return ret;
1210
1211 ret = smu_alloc_dummy_read_table(smu);
1212 if (ret)
1213 return ret;
1214
1215 ret = smu_i2c_init(smu);
1216 if (ret)
1217 return ret;
1218
1219 return 0;
1220 }
1221
smu_smc_table_sw_fini(struct smu_context * smu)1222 static int smu_smc_table_sw_fini(struct smu_context *smu)
1223 {
1224 int ret;
1225
1226 smu_i2c_fini(smu);
1227
1228 smu_free_dummy_read_table(smu);
1229
1230 ret = smu_free_memory_pool(smu);
1231 if (ret)
1232 return ret;
1233
1234 ret = smu_fini_fb_allocations(smu);
1235 if (ret)
1236 return ret;
1237
1238 ret = smu_fini_power(smu);
1239 if (ret) {
1240 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1241 return ret;
1242 }
1243
1244 ret = smu_fini_smc_tables(smu);
1245 if (ret) {
1246 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1247 return ret;
1248 }
1249
1250 return 0;
1251 }
1252
smu_throttling_logging_work_fn(struct work_struct * work)1253 static void smu_throttling_logging_work_fn(struct work_struct *work)
1254 {
1255 struct smu_context *smu = container_of(work, struct smu_context,
1256 throttling_logging_work);
1257
1258 smu_log_thermal_throttling(smu);
1259 }
1260
smu_interrupt_work_fn(struct work_struct * work)1261 static void smu_interrupt_work_fn(struct work_struct *work)
1262 {
1263 struct smu_context *smu = container_of(work, struct smu_context,
1264 interrupt_work);
1265
1266 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1267 smu->ppt_funcs->interrupt_work(smu);
1268 }
1269
smu_swctf_delayed_work_handler(struct work_struct * work)1270 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1271 {
1272 struct smu_context *smu =
1273 container_of(work, struct smu_context, swctf_delayed_work.work);
1274 struct smu_temperature_range *range =
1275 &smu->thermal_range;
1276 struct amdgpu_device *adev = smu->adev;
1277 uint32_t hotspot_tmp, size;
1278
1279 /*
1280 * If the hotspot temperature is confirmed as below SW CTF setting point
1281 * after the delay enforced, nothing will be done.
1282 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1283 */
1284 if (range->software_shutdown_temp &&
1285 smu->ppt_funcs->read_sensor &&
1286 !smu->ppt_funcs->read_sensor(smu,
1287 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1288 &hotspot_tmp,
1289 &size) &&
1290 hotspot_tmp / 1000 < range->software_shutdown_temp)
1291 return;
1292
1293 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1294 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1295 orderly_poweroff(true);
1296 }
1297
smu_init_xgmi_plpd_mode(struct smu_context * smu)1298 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1299 {
1300 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1301 struct smu_dpm_policy_ctxt *policy_ctxt;
1302 struct smu_dpm_policy *policy;
1303
1304 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1305 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1306 if (policy)
1307 policy->current_level = XGMI_PLPD_DEFAULT;
1308 return;
1309 }
1310
1311 /* PMFW put PLPD into default policy after enabling the feature */
1312 if (smu_feature_is_enabled(smu,
1313 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1314 if (policy)
1315 policy->current_level = XGMI_PLPD_DEFAULT;
1316 } else {
1317 policy_ctxt = dpm_ctxt->dpm_policies;
1318 if (policy_ctxt)
1319 policy_ctxt->policy_mask &=
1320 ~BIT(PP_PM_POLICY_XGMI_PLPD);
1321 }
1322 }
1323
smu_init_power_profile(struct smu_context * smu)1324 static void smu_init_power_profile(struct smu_context *smu)
1325 {
1326 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN)
1327 smu->power_profile_mode =
1328 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1329 smu_power_profile_mode_get(smu, smu->power_profile_mode);
1330 }
1331
smu_feature_cap_set(struct smu_context * smu,enum smu_feature_cap_id fea_id)1332 void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id)
1333 {
1334 struct smu_feature_cap *fea_cap = &smu->fea_cap;
1335
1336 if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
1337 return;
1338
1339 set_bit(fea_id, fea_cap->cap_map);
1340 }
1341
smu_feature_cap_test(struct smu_context * smu,enum smu_feature_cap_id fea_id)1342 bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id)
1343 {
1344 struct smu_feature_cap *fea_cap = &smu->fea_cap;
1345
1346 if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
1347 return false;
1348
1349 return test_bit(fea_id, fea_cap->cap_map);
1350 }
1351
smu_feature_cap_init(struct smu_context * smu)1352 static void smu_feature_cap_init(struct smu_context *smu)
1353 {
1354 struct smu_feature_cap *fea_cap = &smu->fea_cap;
1355
1356 bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT);
1357 }
1358
smu_sw_init(struct amdgpu_ip_block * ip_block)1359 static int smu_sw_init(struct amdgpu_ip_block *ip_block)
1360 {
1361 struct amdgpu_device *adev = ip_block->adev;
1362 struct smu_context *smu = adev->powerplay.pp_handle;
1363 int i, ret;
1364
1365 smu->pool_size = adev->pm.smu_prv_buffer_size;
1366 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1367 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1368 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1369
1370 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1371 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1372 atomic64_set(&smu->throttle_int_counter, 0);
1373 smu->watermarks_bitmap = 0;
1374
1375 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1376 atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
1377 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1378 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1379 atomic_set(&smu->smu_power.power_gate.isp_gated, 1);
1380 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1381
1382 smu_init_power_profile(smu);
1383 smu->display_config = &adev->pm.pm_display_cfg;
1384
1385 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1386 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1387
1388 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1389 smu_swctf_delayed_work_handler);
1390
1391 smu_feature_cap_init(smu);
1392
1393 ret = smu_smc_table_sw_init(smu);
1394 if (ret) {
1395 dev_err(adev->dev, "Failed to sw init smc table!\n");
1396 return ret;
1397 }
1398
1399 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1400 ret = smu_get_vbios_bootup_values(smu);
1401 if (ret) {
1402 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1403 return ret;
1404 }
1405
1406 ret = smu_init_pptable_microcode(smu);
1407 if (ret) {
1408 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1409 return ret;
1410 }
1411
1412 ret = smu_register_irq_handler(smu);
1413 if (ret) {
1414 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1415 return ret;
1416 }
1417
1418 /* If there is no way to query fan control mode, fan control is not supported */
1419 if (!smu->ppt_funcs->get_fan_control_mode)
1420 smu->adev->pm.no_fan = true;
1421
1422 return 0;
1423 }
1424
smu_sw_fini(struct amdgpu_ip_block * ip_block)1425 static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
1426 {
1427 struct amdgpu_device *adev = ip_block->adev;
1428 struct smu_context *smu = adev->powerplay.pp_handle;
1429 int ret;
1430
1431 ret = smu_smc_table_sw_fini(smu);
1432 if (ret) {
1433 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1434 return ret;
1435 }
1436
1437 if (smu->custom_profile_params) {
1438 kfree(smu->custom_profile_params);
1439 smu->custom_profile_params = NULL;
1440 }
1441
1442 smu_fini_microcode(smu);
1443
1444 return 0;
1445 }
1446
smu_get_thermal_temperature_range(struct smu_context * smu)1447 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1448 {
1449 struct amdgpu_device *adev = smu->adev;
1450 struct smu_temperature_range *range =
1451 &smu->thermal_range;
1452 int ret = 0;
1453
1454 if (!smu->ppt_funcs->get_thermal_temperature_range)
1455 return 0;
1456
1457 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1458 if (ret)
1459 return ret;
1460
1461 adev->pm.dpm.thermal.min_temp = range->min;
1462 adev->pm.dpm.thermal.max_temp = range->max;
1463 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1464 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1465 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1466 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1467 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1468 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1469 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1470
1471 return ret;
1472 }
1473
1474 /**
1475 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1476 *
1477 * @smu: smu_context pointer
1478 *
1479 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1480 * Returns 0 on success, error on failure.
1481 */
smu_wbrf_handle_exclusion_ranges(struct smu_context * smu)1482 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1483 {
1484 struct wbrf_ranges_in_out wbrf_exclusion = {0};
1485 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1486 struct amdgpu_device *adev = smu->adev;
1487 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1488 uint64_t start, end;
1489 int ret, i, j;
1490
1491 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1492 if (ret) {
1493 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1494 return ret;
1495 }
1496
1497 /*
1498 * The exclusion ranges array we got might be filled with holes and duplicate
1499 * entries. For example:
1500 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1501 * We need to do some sortups to eliminate those holes and duplicate entries.
1502 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1503 */
1504 for (i = 0; i < num_of_wbrf_ranges; i++) {
1505 start = wifi_bands[i].start;
1506 end = wifi_bands[i].end;
1507
1508 /* get the last valid entry to fill the intermediate hole */
1509 if (!start && !end) {
1510 for (j = num_of_wbrf_ranges - 1; j > i; j--)
1511 if (wifi_bands[j].start && wifi_bands[j].end)
1512 break;
1513
1514 /* no valid entry left */
1515 if (j <= i)
1516 break;
1517
1518 start = wifi_bands[i].start = wifi_bands[j].start;
1519 end = wifi_bands[i].end = wifi_bands[j].end;
1520 wifi_bands[j].start = 0;
1521 wifi_bands[j].end = 0;
1522 num_of_wbrf_ranges = j;
1523 }
1524
1525 /* eliminate duplicate entries */
1526 for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1527 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1528 wifi_bands[j].start = 0;
1529 wifi_bands[j].end = 0;
1530 }
1531 }
1532 }
1533
1534 /* Send the sorted wifi_bands to PMFW */
1535 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1536 /* Try to set the wifi_bands again */
1537 if (unlikely(ret == -EBUSY)) {
1538 mdelay(5);
1539 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1540 }
1541
1542 return ret;
1543 }
1544
1545 /**
1546 * smu_wbrf_event_handler - handle notify events
1547 *
1548 * @nb: notifier block
1549 * @action: event type
1550 * @_arg: event data
1551 *
1552 * Calls relevant amdgpu function in response to wbrf event
1553 * notification from kernel.
1554 */
smu_wbrf_event_handler(struct notifier_block * nb,unsigned long action,void * _arg)1555 static int smu_wbrf_event_handler(struct notifier_block *nb,
1556 unsigned long action, void *_arg)
1557 {
1558 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1559
1560 switch (action) {
1561 case WBRF_CHANGED:
1562 schedule_delayed_work(&smu->wbrf_delayed_work,
1563 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1564 break;
1565 default:
1566 return NOTIFY_DONE;
1567 }
1568
1569 return NOTIFY_OK;
1570 }
1571
1572 /**
1573 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1574 *
1575 * @work: struct work_struct pointer
1576 *
1577 * Flood is over and driver will consume the latest exclusion ranges.
1578 */
smu_wbrf_delayed_work_handler(struct work_struct * work)1579 static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1580 {
1581 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1582
1583 smu_wbrf_handle_exclusion_ranges(smu);
1584 }
1585
1586 /**
1587 * smu_wbrf_support_check - check wbrf support
1588 *
1589 * @smu: smu_context pointer
1590 *
1591 * Verifies the ACPI interface whether wbrf is supported.
1592 */
smu_wbrf_support_check(struct smu_context * smu)1593 static void smu_wbrf_support_check(struct smu_context *smu)
1594 {
1595 struct amdgpu_device *adev = smu->adev;
1596
1597 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1598 acpi_amd_wbrf_supported_consumer(adev->dev);
1599
1600 if (smu->wbrf_supported)
1601 dev_info(adev->dev, "RF interference mitigation is supported\n");
1602 }
1603
1604 /**
1605 * smu_wbrf_init - init driver wbrf support
1606 *
1607 * @smu: smu_context pointer
1608 *
1609 * Verifies the AMD ACPI interfaces and registers with the wbrf
1610 * notifier chain if wbrf feature is supported.
1611 * Returns 0 on success, error on failure.
1612 */
smu_wbrf_init(struct smu_context * smu)1613 static int smu_wbrf_init(struct smu_context *smu)
1614 {
1615 int ret;
1616
1617 if (!smu->wbrf_supported)
1618 return 0;
1619
1620 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1621
1622 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1623 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1624 if (ret)
1625 return ret;
1626
1627 /*
1628 * Some wifiband exclusion ranges may be already there
1629 * before our driver loaded. To make sure our driver
1630 * is awared of those exclusion ranges.
1631 */
1632 schedule_delayed_work(&smu->wbrf_delayed_work,
1633 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1634
1635 return 0;
1636 }
1637
1638 /**
1639 * smu_wbrf_fini - tear down driver wbrf support
1640 *
1641 * @smu: smu_context pointer
1642 *
1643 * Unregisters with the wbrf notifier chain.
1644 */
smu_wbrf_fini(struct smu_context * smu)1645 static void smu_wbrf_fini(struct smu_context *smu)
1646 {
1647 if (!smu->wbrf_supported)
1648 return;
1649
1650 amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1651
1652 cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1653 }
1654
smu_smc_hw_setup(struct smu_context * smu)1655 static int smu_smc_hw_setup(struct smu_context *smu)
1656 {
1657 struct smu_feature *feature = &smu->smu_feature;
1658 struct amdgpu_device *adev = smu->adev;
1659 uint8_t pcie_gen = 0, pcie_width = 0;
1660 uint64_t features_supported;
1661 int ret = 0;
1662
1663 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1664 case IP_VERSION(11, 0, 7):
1665 case IP_VERSION(11, 0, 11):
1666 case IP_VERSION(11, 5, 0):
1667 case IP_VERSION(11, 5, 2):
1668 case IP_VERSION(11, 0, 12):
1669 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1670 dev_info(adev->dev, "dpm has been enabled\n");
1671 ret = smu_system_features_control(smu, true);
1672 if (ret) {
1673 dev_err(adev->dev, "Failed system features control!\n");
1674 return ret;
1675 }
1676
1677 return smu_enable_thermal_alert(smu);
1678 }
1679 break;
1680 default:
1681 break;
1682 }
1683
1684 ret = smu_init_display_count(smu, 0);
1685 if (ret) {
1686 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1687 return ret;
1688 }
1689
1690 ret = smu_set_driver_table_location(smu);
1691 if (ret) {
1692 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1693 return ret;
1694 }
1695
1696 /*
1697 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1698 */
1699 ret = smu_set_tool_table_location(smu);
1700 if (ret) {
1701 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1702 return ret;
1703 }
1704
1705 /*
1706 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1707 * pool location.
1708 */
1709 ret = smu_notify_memory_pool_location(smu);
1710 if (ret) {
1711 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1712 return ret;
1713 }
1714
1715 /*
1716 * It is assumed the pptable used before runpm is same as
1717 * the one used afterwards. Thus, we can reuse the stored
1718 * copy and do not need to resetup the pptable again.
1719 */
1720 if (!adev->in_runpm) {
1721 ret = smu_setup_pptable(smu);
1722 if (ret) {
1723 dev_err(adev->dev, "Failed to setup pptable!\n");
1724 return ret;
1725 }
1726 }
1727
1728 /* smu_dump_pptable(smu); */
1729
1730 /*
1731 * With SCPM enabled, PSP is responsible for the PPTable transferring
1732 * (to SMU). Driver involvement is not needed and permitted.
1733 */
1734 if (!adev->scpm_enabled) {
1735 /*
1736 * Copy pptable bo in the vram to smc with SMU MSGs such as
1737 * SetDriverDramAddr and TransferTableDram2Smu.
1738 */
1739 ret = smu_write_pptable(smu);
1740 if (ret) {
1741 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1742 return ret;
1743 }
1744 }
1745
1746 /* issue Run*Btc msg */
1747 ret = smu_run_btc(smu);
1748 if (ret)
1749 return ret;
1750
1751 /* Enable UclkShadow on wbrf supported */
1752 if (smu->wbrf_supported) {
1753 ret = smu_enable_uclk_shadow(smu, true);
1754 if (ret) {
1755 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1756 return ret;
1757 }
1758 }
1759
1760 /*
1761 * With SCPM enabled, these actions(and relevant messages) are
1762 * not needed and permitted.
1763 */
1764 if (!adev->scpm_enabled) {
1765 ret = smu_feature_set_allowed_mask(smu);
1766 if (ret) {
1767 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1768 return ret;
1769 }
1770 }
1771
1772 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
1773 pcie_gen = 4;
1774 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1775 pcie_gen = 3;
1776 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1777 pcie_gen = 2;
1778 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1779 pcie_gen = 1;
1780 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1781 pcie_gen = 0;
1782
1783 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1784 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1785 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1786 */
1787 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
1788 pcie_width = 7;
1789 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1790 pcie_width = 6;
1791 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1792 pcie_width = 5;
1793 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1794 pcie_width = 4;
1795 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1796 pcie_width = 3;
1797 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1798 pcie_width = 2;
1799 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1800 pcie_width = 1;
1801 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1802 if (ret) {
1803 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1804 return ret;
1805 }
1806
1807 ret = smu_system_features_control(smu, true);
1808 if (ret) {
1809 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1810 return ret;
1811 }
1812
1813 smu_init_xgmi_plpd_mode(smu);
1814
1815 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1816 if (ret) {
1817 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1818 return ret;
1819 }
1820 bitmap_copy(feature->supported,
1821 (unsigned long *)&features_supported,
1822 feature->feature_num);
1823
1824 if (!smu_is_dpm_running(smu))
1825 dev_info(adev->dev, "dpm has been disabled\n");
1826
1827 /*
1828 * Set initialized values (get from vbios) to dpm tables context such as
1829 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1830 * type of clks.
1831 */
1832 ret = smu_set_default_dpm_table(smu);
1833 if (ret) {
1834 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1835 return ret;
1836 }
1837
1838 ret = smu_get_thermal_temperature_range(smu);
1839 if (ret) {
1840 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1841 return ret;
1842 }
1843
1844 ret = smu_enable_thermal_alert(smu);
1845 if (ret) {
1846 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1847 return ret;
1848 }
1849
1850 ret = smu_notify_display_change(smu);
1851 if (ret) {
1852 dev_err(adev->dev, "Failed to notify display change!\n");
1853 return ret;
1854 }
1855
1856 /*
1857 * Set min deep sleep dce fclk with bootup value from vbios via
1858 * SetMinDeepSleepDcefclk MSG.
1859 */
1860 ret = smu_set_min_dcef_deep_sleep(smu,
1861 smu->smu_table.boot_values.dcefclk / 100);
1862 if (ret) {
1863 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1864 return ret;
1865 }
1866
1867 /* Init wbrf support. Properly setup the notifier */
1868 ret = smu_wbrf_init(smu);
1869 if (ret)
1870 dev_err(adev->dev, "Error during wbrf init call\n");
1871
1872 return ret;
1873 }
1874
smu_start_smc_engine(struct smu_context * smu)1875 static int smu_start_smc_engine(struct smu_context *smu)
1876 {
1877 struct amdgpu_device *adev = smu->adev;
1878 int ret = 0;
1879
1880 if (amdgpu_virt_xgmi_migrate_enabled(adev))
1881 smu_update_gpu_addresses(smu);
1882
1883 smu->smc_fw_state = SMU_FW_INIT;
1884
1885 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1886 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1887 if (smu->ppt_funcs->load_microcode) {
1888 ret = smu->ppt_funcs->load_microcode(smu);
1889 if (ret)
1890 return ret;
1891 }
1892 }
1893 }
1894
1895 if (smu->ppt_funcs->check_fw_status) {
1896 ret = smu->ppt_funcs->check_fw_status(smu);
1897 if (ret) {
1898 dev_err(adev->dev, "SMC is not ready\n");
1899 return ret;
1900 }
1901 }
1902
1903 /*
1904 * Send msg GetDriverIfVersion to check if the return value is equal
1905 * with DRIVER_IF_VERSION of smc header.
1906 */
1907 ret = smu_check_fw_version(smu);
1908 if (ret)
1909 return ret;
1910
1911 return ret;
1912 }
1913
smu_hw_init(struct amdgpu_ip_block * ip_block)1914 static int smu_hw_init(struct amdgpu_ip_block *ip_block)
1915 {
1916 int i, ret;
1917 struct amdgpu_device *adev = ip_block->adev;
1918 struct smu_context *smu = adev->powerplay.pp_handle;
1919
1920 if (amdgpu_sriov_multi_vf_mode(adev)) {
1921 smu->pm_enabled = false;
1922 return 0;
1923 }
1924
1925 ret = smu_start_smc_engine(smu);
1926 if (ret) {
1927 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1928 return ret;
1929 }
1930
1931 /*
1932 * Check whether wbrf is supported. This needs to be done
1933 * before SMU setup starts since part of SMU configuration
1934 * relies on this.
1935 */
1936 smu_wbrf_support_check(smu);
1937
1938 if (smu->is_apu) {
1939 ret = smu_set_gfx_imu_enable(smu);
1940 if (ret)
1941 return ret;
1942 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1943 smu_dpm_set_vcn_enable(smu, true, i);
1944 smu_dpm_set_jpeg_enable(smu, true);
1945 smu_dpm_set_umsch_mm_enable(smu, true);
1946 smu_set_mall_enable(smu);
1947 smu_set_gfx_cgpg(smu, true);
1948 }
1949
1950 if (!smu->pm_enabled)
1951 return 0;
1952
1953 ret = smu_get_driver_allowed_feature_mask(smu);
1954 if (ret)
1955 return ret;
1956
1957 ret = smu_smc_hw_setup(smu);
1958 if (ret) {
1959 dev_err(adev->dev, "Failed to setup smc hw!\n");
1960 return ret;
1961 }
1962
1963 /*
1964 * Move maximum sustainable clock retrieving here considering
1965 * 1. It is not needed on resume(from S3).
1966 * 2. DAL settings come between .hw_init and .late_init of SMU.
1967 * And DAL needs to know the maximum sustainable clocks. Thus
1968 * it cannot be put in .late_init().
1969 */
1970 ret = smu_init_max_sustainable_clocks(smu);
1971 if (ret) {
1972 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1973 return ret;
1974 }
1975
1976 adev->pm.dpm_enabled = true;
1977
1978 dev_info(adev->dev, "SMU is initialized successfully!\n");
1979
1980 return 0;
1981 }
1982
smu_disable_dpms(struct smu_context * smu)1983 static int smu_disable_dpms(struct smu_context *smu)
1984 {
1985 struct amdgpu_device *adev = smu->adev;
1986 int ret = 0;
1987 bool use_baco = !smu->is_apu &&
1988 ((amdgpu_in_reset(adev) &&
1989 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1990 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1991
1992 /*
1993 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1994 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1995 */
1996 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1997 case IP_VERSION(13, 0, 0):
1998 case IP_VERSION(13, 0, 7):
1999 case IP_VERSION(13, 0, 10):
2000 case IP_VERSION(14, 0, 2):
2001 case IP_VERSION(14, 0, 3):
2002 return 0;
2003 default:
2004 break;
2005 }
2006
2007 /*
2008 * For custom pptable uploading, skip the DPM features
2009 * disable process on Navi1x ASICs.
2010 * - As the gfx related features are under control of
2011 * RLC on those ASICs. RLC reinitialization will be
2012 * needed to reenable them. That will cost much more
2013 * efforts.
2014 *
2015 * - SMU firmware can handle the DPM reenablement
2016 * properly.
2017 */
2018 if (smu->uploading_custom_pp_table) {
2019 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2020 case IP_VERSION(11, 0, 0):
2021 case IP_VERSION(11, 0, 5):
2022 case IP_VERSION(11, 0, 9):
2023 case IP_VERSION(11, 0, 7):
2024 case IP_VERSION(11, 0, 11):
2025 case IP_VERSION(11, 5, 0):
2026 case IP_VERSION(11, 5, 2):
2027 case IP_VERSION(11, 0, 12):
2028 case IP_VERSION(11, 0, 13):
2029 return 0;
2030 default:
2031 break;
2032 }
2033 }
2034
2035 /*
2036 * For Sienna_Cichlid, PMFW will handle the features disablement properly
2037 * on BACO in. Driver involvement is unnecessary.
2038 */
2039 if (use_baco) {
2040 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2041 case IP_VERSION(11, 0, 7):
2042 case IP_VERSION(11, 0, 0):
2043 case IP_VERSION(11, 0, 5):
2044 case IP_VERSION(11, 0, 9):
2045 case IP_VERSION(13, 0, 7):
2046 return 0;
2047 default:
2048 break;
2049 }
2050 }
2051
2052 /*
2053 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
2054 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
2055 */
2056 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
2057 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
2058 return 0;
2059
2060 /* vangogh s0ix */
2061 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
2062 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) &&
2063 adev->in_s0ix)
2064 return 0;
2065
2066 /*
2067 * For gpu reset, runpm and hibernation through BACO,
2068 * BACO feature has to be kept enabled.
2069 */
2070 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
2071 ret = smu_disable_all_features_with_exception(smu,
2072 SMU_FEATURE_BACO_BIT);
2073 if (ret)
2074 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
2075 } else {
2076 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
2077 if (!adev->scpm_enabled) {
2078 ret = smu_system_features_control(smu, false);
2079 if (ret)
2080 dev_err(adev->dev, "Failed to disable smu features.\n");
2081 }
2082 }
2083
2084 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
2085 * otherwise SMU will hang while interacting with RLC if RLC is halted
2086 * this is a WA for Vangogh asic which fix the SMU hang issue.
2087 */
2088 ret = smu_notify_rlc_state(smu, false);
2089 if (ret) {
2090 dev_err(adev->dev, "Fail to notify rlc status!\n");
2091 return ret;
2092 }
2093
2094 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
2095 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
2096 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
2097 adev->gfx.rlc.funcs->stop(adev);
2098
2099 return ret;
2100 }
2101
smu_smc_hw_cleanup(struct smu_context * smu)2102 static int smu_smc_hw_cleanup(struct smu_context *smu)
2103 {
2104 struct amdgpu_device *adev = smu->adev;
2105 int ret = 0;
2106
2107 smu_wbrf_fini(smu);
2108
2109 cancel_work_sync(&smu->throttling_logging_work);
2110 cancel_work_sync(&smu->interrupt_work);
2111
2112 ret = smu_disable_thermal_alert(smu);
2113 if (ret) {
2114 dev_err(adev->dev, "Fail to disable thermal alert!\n");
2115 return ret;
2116 }
2117
2118 cancel_delayed_work_sync(&smu->swctf_delayed_work);
2119
2120 ret = smu_disable_dpms(smu);
2121 if (ret) {
2122 dev_err(adev->dev, "Fail to disable dpm features!\n");
2123 return ret;
2124 }
2125
2126 return 0;
2127 }
2128
smu_reset_mp1_state(struct smu_context * smu)2129 static int smu_reset_mp1_state(struct smu_context *smu)
2130 {
2131 struct amdgpu_device *adev = smu->adev;
2132 int ret = 0;
2133
2134 if ((!adev->in_runpm) && (!adev->in_suspend) &&
2135 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2136 IP_VERSION(13, 0, 10) &&
2137 !amdgpu_device_has_display_hardware(adev))
2138 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2139
2140 return ret;
2141 }
2142
smu_hw_fini(struct amdgpu_ip_block * ip_block)2143 static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
2144 {
2145 struct amdgpu_device *adev = ip_block->adev;
2146 struct smu_context *smu = adev->powerplay.pp_handle;
2147 int i, ret;
2148
2149 if (amdgpu_sriov_multi_vf_mode(adev))
2150 return 0;
2151
2152 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2153 smu_dpm_set_vcn_enable(smu, false, i);
2154 adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE;
2155 }
2156 smu_dpm_set_jpeg_enable(smu, false);
2157 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2158 smu_dpm_set_umsch_mm_enable(smu, false);
2159
2160 if (!smu->pm_enabled)
2161 return 0;
2162
2163 adev->pm.dpm_enabled = false;
2164
2165 ret = smu_smc_hw_cleanup(smu);
2166 if (ret)
2167 return ret;
2168
2169 ret = smu_reset_mp1_state(smu);
2170 if (ret)
2171 return ret;
2172
2173 return 0;
2174 }
2175
smu_late_fini(struct amdgpu_ip_block * ip_block)2176 static void smu_late_fini(struct amdgpu_ip_block *ip_block)
2177 {
2178 struct amdgpu_device *adev = ip_block->adev;
2179 struct smu_context *smu = adev->powerplay.pp_handle;
2180
2181 kfree(smu);
2182 }
2183
smu_reset(struct smu_context * smu)2184 static int smu_reset(struct smu_context *smu)
2185 {
2186 struct amdgpu_device *adev = smu->adev;
2187 struct amdgpu_ip_block *ip_block;
2188 int ret;
2189
2190 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
2191 if (!ip_block)
2192 return -EINVAL;
2193
2194 ret = smu_hw_fini(ip_block);
2195 if (ret)
2196 return ret;
2197
2198 ret = smu_hw_init(ip_block);
2199 if (ret)
2200 return ret;
2201
2202 ret = smu_late_init(ip_block);
2203 if (ret)
2204 return ret;
2205
2206 return 0;
2207 }
2208
smu_suspend(struct amdgpu_ip_block * ip_block)2209 static int smu_suspend(struct amdgpu_ip_block *ip_block)
2210 {
2211 struct amdgpu_device *adev = ip_block->adev;
2212 struct smu_context *smu = adev->powerplay.pp_handle;
2213 int ret;
2214 uint64_t count;
2215
2216 if (amdgpu_sriov_multi_vf_mode(adev))
2217 return 0;
2218
2219 if (!smu->pm_enabled)
2220 return 0;
2221
2222 adev->pm.dpm_enabled = false;
2223
2224 ret = smu_smc_hw_cleanup(smu);
2225 if (ret)
2226 return ret;
2227
2228 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2229
2230 smu_set_gfx_cgpg(smu, false);
2231
2232 /*
2233 * pwfw resets entrycount when device is suspended, so we save the
2234 * last value to be used when we resume to keep it consistent
2235 */
2236 ret = smu_get_entrycount_gfxoff(smu, &count);
2237 if (!ret)
2238 adev->gfx.gfx_off_entrycount = count;
2239
2240 /* clear this on suspend so it will get reprogrammed on resume */
2241 smu->workload_mask = 0;
2242
2243 return 0;
2244 }
2245
smu_resume(struct amdgpu_ip_block * ip_block)2246 static int smu_resume(struct amdgpu_ip_block *ip_block)
2247 {
2248 int ret;
2249 struct amdgpu_device *adev = ip_block->adev;
2250 struct smu_context *smu = adev->powerplay.pp_handle;
2251
2252 if (amdgpu_sriov_multi_vf_mode(adev))
2253 return 0;
2254
2255 if (!smu->pm_enabled)
2256 return 0;
2257
2258 dev_info(adev->dev, "SMU is resuming...\n");
2259
2260 ret = smu_start_smc_engine(smu);
2261 if (ret) {
2262 dev_err(adev->dev, "SMC engine is not correctly up!\n");
2263 return ret;
2264 }
2265
2266 ret = smu_smc_hw_setup(smu);
2267 if (ret) {
2268 dev_err(adev->dev, "Failed to setup smc hw!\n");
2269 return ret;
2270 }
2271
2272 ret = smu_set_gfx_imu_enable(smu);
2273 if (ret)
2274 return ret;
2275
2276 smu_set_gfx_cgpg(smu, true);
2277
2278 smu->disable_uclk_switch = 0;
2279
2280 adev->pm.dpm_enabled = true;
2281
2282 dev_info(adev->dev, "SMU is resumed successfully!\n");
2283
2284 return 0;
2285 }
2286
smu_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)2287 static int smu_display_configuration_change(void *handle,
2288 const struct amd_pp_display_configuration *display_config)
2289 {
2290 struct smu_context *smu = handle;
2291
2292 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2293 return -EOPNOTSUPP;
2294
2295 if (!display_config)
2296 return -EINVAL;
2297
2298 smu_set_min_dcef_deep_sleep(smu,
2299 display_config->min_dcef_deep_sleep_set_clk / 100);
2300
2301 return 0;
2302 }
2303
smu_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)2304 static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2305 enum amd_clockgating_state state)
2306 {
2307 return 0;
2308 }
2309
smu_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)2310 static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block,
2311 enum amd_powergating_state state)
2312 {
2313 return 0;
2314 }
2315
smu_enable_umd_pstate(void * handle,enum amd_dpm_forced_level * level)2316 static int smu_enable_umd_pstate(void *handle,
2317 enum amd_dpm_forced_level *level)
2318 {
2319 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2320 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2321 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2322 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2323
2324 struct smu_context *smu = (struct smu_context*)(handle);
2325 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2326
2327 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2328 return -EINVAL;
2329
2330 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2331 /* enter umd pstate, save current level, disable gfx cg*/
2332 if (*level & profile_mode_mask) {
2333 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2334 smu_gpo_control(smu, false);
2335 smu_gfx_ulv_control(smu, false);
2336 smu_deep_sleep_control(smu, false);
2337 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2338 }
2339 } else {
2340 /* exit umd pstate, restore level, enable gfx cg*/
2341 if (!(*level & profile_mode_mask)) {
2342 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2343 *level = smu_dpm_ctx->saved_dpm_level;
2344 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2345 smu_deep_sleep_control(smu, true);
2346 smu_gfx_ulv_control(smu, true);
2347 smu_gpo_control(smu, true);
2348 }
2349 }
2350
2351 return 0;
2352 }
2353
smu_bump_power_profile_mode(struct smu_context * smu,long * custom_params,u32 custom_params_max_idx)2354 static int smu_bump_power_profile_mode(struct smu_context *smu,
2355 long *custom_params,
2356 u32 custom_params_max_idx)
2357 {
2358 u32 workload_mask = 0;
2359 int i, ret = 0;
2360
2361 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
2362 if (smu->workload_refcount[i])
2363 workload_mask |= 1 << i;
2364 }
2365
2366 if (smu->workload_mask == workload_mask)
2367 return 0;
2368
2369 if (smu->ppt_funcs->set_power_profile_mode)
2370 ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
2371 custom_params,
2372 custom_params_max_idx);
2373
2374 if (!ret)
2375 smu->workload_mask = workload_mask;
2376
2377 return ret;
2378 }
2379
smu_power_profile_mode_get(struct smu_context * smu,enum PP_SMC_POWER_PROFILE profile_mode)2380 static void smu_power_profile_mode_get(struct smu_context *smu,
2381 enum PP_SMC_POWER_PROFILE profile_mode)
2382 {
2383 smu->workload_refcount[profile_mode]++;
2384 }
2385
smu_power_profile_mode_put(struct smu_context * smu,enum PP_SMC_POWER_PROFILE profile_mode)2386 static void smu_power_profile_mode_put(struct smu_context *smu,
2387 enum PP_SMC_POWER_PROFILE profile_mode)
2388 {
2389 if (smu->workload_refcount[profile_mode])
2390 smu->workload_refcount[profile_mode]--;
2391 }
2392
smu_adjust_power_state_dynamic(struct smu_context * smu,enum amd_dpm_forced_level level,bool skip_display_settings)2393 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2394 enum amd_dpm_forced_level level,
2395 bool skip_display_settings)
2396 {
2397 int ret = 0;
2398 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2399
2400 if (!skip_display_settings) {
2401 ret = smu_display_config_changed(smu);
2402 if (ret) {
2403 dev_err(smu->adev->dev, "Failed to change display config!");
2404 return ret;
2405 }
2406 }
2407
2408 ret = smu_apply_clocks_adjust_rules(smu);
2409 if (ret) {
2410 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2411 return ret;
2412 }
2413
2414 if (!skip_display_settings) {
2415 ret = smu_notify_smc_display_config(smu);
2416 if (ret) {
2417 dev_err(smu->adev->dev, "Failed to notify smc display config!");
2418 return ret;
2419 }
2420 }
2421
2422 if (smu_dpm_ctx->dpm_level != level) {
2423 ret = smu_asic_set_performance_level(smu, level);
2424 if (ret) {
2425 if (ret == -EOPNOTSUPP)
2426 dev_info(smu->adev->dev, "set performance level %d not supported",
2427 level);
2428 else
2429 dev_err(smu->adev->dev, "Failed to set performance level %d",
2430 level);
2431 return ret;
2432 }
2433
2434 /* update the saved copy */
2435 smu_dpm_ctx->dpm_level = level;
2436 }
2437
2438 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2439 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2440 smu_bump_power_profile_mode(smu, NULL, 0);
2441
2442 return ret;
2443 }
2444
smu_handle_task(struct smu_context * smu,enum amd_dpm_forced_level level,enum amd_pp_task task_id)2445 static int smu_handle_task(struct smu_context *smu,
2446 enum amd_dpm_forced_level level,
2447 enum amd_pp_task task_id)
2448 {
2449 int ret = 0;
2450
2451 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2452 return -EOPNOTSUPP;
2453
2454 switch (task_id) {
2455 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2456 ret = smu_pre_display_config_changed(smu);
2457 if (ret)
2458 return ret;
2459 ret = smu_adjust_power_state_dynamic(smu, level, false);
2460 break;
2461 case AMD_PP_TASK_COMPLETE_INIT:
2462 ret = smu_adjust_power_state_dynamic(smu, level, true);
2463 break;
2464 case AMD_PP_TASK_READJUST_POWER_STATE:
2465 ret = smu_adjust_power_state_dynamic(smu, level, true);
2466 break;
2467 default:
2468 break;
2469 }
2470
2471 return ret;
2472 }
2473
smu_handle_dpm_task(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)2474 static int smu_handle_dpm_task(void *handle,
2475 enum amd_pp_task task_id,
2476 enum amd_pm_state_type *user_state)
2477 {
2478 struct smu_context *smu = handle;
2479 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2480
2481 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2482
2483 }
2484
smu_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool enable)2485 static int smu_switch_power_profile(void *handle,
2486 enum PP_SMC_POWER_PROFILE type,
2487 bool enable)
2488 {
2489 struct smu_context *smu = handle;
2490 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2491 int ret;
2492
2493 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2494 return -EOPNOTSUPP;
2495
2496 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2497 return -EINVAL;
2498
2499 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2500 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2501 if (enable)
2502 smu_power_profile_mode_get(smu, type);
2503 else
2504 smu_power_profile_mode_put(smu, type);
2505 /* don't switch the active workload when paused */
2506 if (smu->pause_workload)
2507 ret = 0;
2508 else
2509 ret = smu_bump_power_profile_mode(smu, NULL, 0);
2510 if (ret) {
2511 if (enable)
2512 smu_power_profile_mode_put(smu, type);
2513 else
2514 smu_power_profile_mode_get(smu, type);
2515 return ret;
2516 }
2517 }
2518
2519 return 0;
2520 }
2521
smu_pause_power_profile(void * handle,bool pause)2522 static int smu_pause_power_profile(void *handle,
2523 bool pause)
2524 {
2525 struct smu_context *smu = handle;
2526 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2527 u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
2528 int ret;
2529
2530 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2531 return -EOPNOTSUPP;
2532
2533 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2534 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2535 smu->pause_workload = pause;
2536
2537 /* force to bootup default profile */
2538 if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
2539 ret = smu->ppt_funcs->set_power_profile_mode(smu,
2540 workload_mask,
2541 NULL,
2542 0);
2543 else
2544 ret = smu_bump_power_profile_mode(smu, NULL, 0);
2545 return ret;
2546 }
2547
2548 return 0;
2549 }
2550
smu_get_performance_level(void * handle)2551 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2552 {
2553 struct smu_context *smu = handle;
2554 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2555
2556 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2557 return -EOPNOTSUPP;
2558
2559 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2560 return -EINVAL;
2561
2562 return smu_dpm_ctx->dpm_level;
2563 }
2564
smu_force_performance_level(void * handle,enum amd_dpm_forced_level level)2565 static int smu_force_performance_level(void *handle,
2566 enum amd_dpm_forced_level level)
2567 {
2568 struct smu_context *smu = handle;
2569 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2570 int ret = 0;
2571
2572 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2573 return -EOPNOTSUPP;
2574
2575 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2576 return -EINVAL;
2577
2578 ret = smu_enable_umd_pstate(smu, &level);
2579 if (ret)
2580 return ret;
2581
2582 ret = smu_handle_task(smu, level,
2583 AMD_PP_TASK_READJUST_POWER_STATE);
2584
2585 /* reset user dpm clock state */
2586 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2587 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2588 smu->user_dpm_profile.clk_dependency = 0;
2589 }
2590
2591 return ret;
2592 }
2593
smu_set_display_count(void * handle,uint32_t count)2594 static int smu_set_display_count(void *handle, uint32_t count)
2595 {
2596 struct smu_context *smu = handle;
2597
2598 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2599 return -EOPNOTSUPP;
2600
2601 return smu_init_display_count(smu, count);
2602 }
2603
smu_force_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)2604 static int smu_force_smuclk_levels(struct smu_context *smu,
2605 enum smu_clk_type clk_type,
2606 uint32_t mask)
2607 {
2608 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2609 int ret = 0;
2610
2611 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2612 return -EOPNOTSUPP;
2613
2614 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2615 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2616 return -EINVAL;
2617 }
2618
2619 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2620 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2621 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2622 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2623 smu_set_user_clk_dependencies(smu, clk_type);
2624 }
2625 }
2626
2627 return ret;
2628 }
2629
smu_force_ppclk_levels(void * handle,enum pp_clock_type type,uint32_t mask)2630 static int smu_force_ppclk_levels(void *handle,
2631 enum pp_clock_type type,
2632 uint32_t mask)
2633 {
2634 struct smu_context *smu = handle;
2635 enum smu_clk_type clk_type;
2636
2637 switch (type) {
2638 case PP_SCLK:
2639 clk_type = SMU_SCLK; break;
2640 case PP_MCLK:
2641 clk_type = SMU_MCLK; break;
2642 case PP_PCIE:
2643 clk_type = SMU_PCIE; break;
2644 case PP_SOCCLK:
2645 clk_type = SMU_SOCCLK; break;
2646 case PP_FCLK:
2647 clk_type = SMU_FCLK; break;
2648 case PP_DCEFCLK:
2649 clk_type = SMU_DCEFCLK; break;
2650 case PP_VCLK:
2651 clk_type = SMU_VCLK; break;
2652 case PP_VCLK1:
2653 clk_type = SMU_VCLK1; break;
2654 case PP_DCLK:
2655 clk_type = SMU_DCLK; break;
2656 case PP_DCLK1:
2657 clk_type = SMU_DCLK1; break;
2658 case OD_SCLK:
2659 clk_type = SMU_OD_SCLK; break;
2660 case OD_MCLK:
2661 clk_type = SMU_OD_MCLK; break;
2662 case OD_VDDC_CURVE:
2663 clk_type = SMU_OD_VDDC_CURVE; break;
2664 case OD_RANGE:
2665 clk_type = SMU_OD_RANGE; break;
2666 default:
2667 return -EINVAL;
2668 }
2669
2670 return smu_force_smuclk_levels(smu, clk_type, mask);
2671 }
2672
2673 /*
2674 * On system suspending or resetting, the dpm_enabled
2675 * flag will be cleared. So that those SMU services which
2676 * are not supported will be gated.
2677 * However, the mp1 state setting should still be granted
2678 * even if the dpm_enabled cleared.
2679 */
smu_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)2680 static int smu_set_mp1_state(void *handle,
2681 enum pp_mp1_state mp1_state)
2682 {
2683 struct smu_context *smu = handle;
2684 int ret = 0;
2685
2686 if (!smu->pm_enabled)
2687 return -EOPNOTSUPP;
2688
2689 if (smu->ppt_funcs &&
2690 smu->ppt_funcs->set_mp1_state)
2691 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2692
2693 return ret;
2694 }
2695
smu_set_df_cstate(void * handle,enum pp_df_cstate state)2696 static int smu_set_df_cstate(void *handle,
2697 enum pp_df_cstate state)
2698 {
2699 struct smu_context *smu = handle;
2700 int ret = 0;
2701
2702 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2703 return -EOPNOTSUPP;
2704
2705 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2706 return 0;
2707
2708 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2709 if (ret)
2710 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2711
2712 return ret;
2713 }
2714
smu_write_watermarks_table(struct smu_context * smu)2715 int smu_write_watermarks_table(struct smu_context *smu)
2716 {
2717 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2718 return -EOPNOTSUPP;
2719
2720 return smu_set_watermarks_table(smu, NULL);
2721 }
2722
smu_set_watermarks_for_clock_ranges(void * handle,struct pp_smu_wm_range_sets * clock_ranges)2723 static int smu_set_watermarks_for_clock_ranges(void *handle,
2724 struct pp_smu_wm_range_sets *clock_ranges)
2725 {
2726 struct smu_context *smu = handle;
2727
2728 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2729 return -EOPNOTSUPP;
2730
2731 if (smu->disable_watermark)
2732 return 0;
2733
2734 return smu_set_watermarks_table(smu, clock_ranges);
2735 }
2736
smu_set_ac_dc(struct smu_context * smu)2737 int smu_set_ac_dc(struct smu_context *smu)
2738 {
2739 int ret = 0;
2740
2741 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2742 return -EOPNOTSUPP;
2743
2744 /* controlled by firmware */
2745 if (smu->dc_controlled_by_gpio)
2746 return 0;
2747
2748 ret = smu_set_power_source(smu,
2749 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2750 SMU_POWER_SOURCE_DC);
2751 if (ret)
2752 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2753 smu->adev->pm.ac_power ? "AC" : "DC");
2754
2755 return ret;
2756 }
2757
2758 const struct amd_ip_funcs smu_ip_funcs = {
2759 .name = "smu",
2760 .early_init = smu_early_init,
2761 .late_init = smu_late_init,
2762 .sw_init = smu_sw_init,
2763 .sw_fini = smu_sw_fini,
2764 .hw_init = smu_hw_init,
2765 .hw_fini = smu_hw_fini,
2766 .late_fini = smu_late_fini,
2767 .suspend = smu_suspend,
2768 .resume = smu_resume,
2769 .is_idle = NULL,
2770 .check_soft_reset = NULL,
2771 .wait_for_idle = NULL,
2772 .soft_reset = NULL,
2773 .set_clockgating_state = smu_set_clockgating_state,
2774 .set_powergating_state = smu_set_powergating_state,
2775 };
2776
2777 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2778 .type = AMD_IP_BLOCK_TYPE_SMC,
2779 .major = 11,
2780 .minor = 0,
2781 .rev = 0,
2782 .funcs = &smu_ip_funcs,
2783 };
2784
2785 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2786 .type = AMD_IP_BLOCK_TYPE_SMC,
2787 .major = 12,
2788 .minor = 0,
2789 .rev = 0,
2790 .funcs = &smu_ip_funcs,
2791 };
2792
2793 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2794 .type = AMD_IP_BLOCK_TYPE_SMC,
2795 .major = 13,
2796 .minor = 0,
2797 .rev = 0,
2798 .funcs = &smu_ip_funcs,
2799 };
2800
2801 const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2802 .type = AMD_IP_BLOCK_TYPE_SMC,
2803 .major = 14,
2804 .minor = 0,
2805 .rev = 0,
2806 .funcs = &smu_ip_funcs,
2807 };
2808
smu_get_ras_smu_driver(void * handle)2809 const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle)
2810 {
2811 struct smu_context *smu = (struct smu_context *)handle;
2812 const struct ras_smu_drv *tmp = NULL;
2813 int ret;
2814
2815 ret = smu_get_ras_smu_drv(smu, &tmp);
2816
2817 return ret ? NULL : tmp;
2818 }
2819
smu_load_microcode(void * handle)2820 static int smu_load_microcode(void *handle)
2821 {
2822 struct smu_context *smu = handle;
2823 struct amdgpu_device *adev = smu->adev;
2824 int ret = 0;
2825
2826 if (!smu->pm_enabled)
2827 return -EOPNOTSUPP;
2828
2829 /* This should be used for non PSP loading */
2830 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2831 return 0;
2832
2833 if (smu->ppt_funcs->load_microcode) {
2834 ret = smu->ppt_funcs->load_microcode(smu);
2835 if (ret) {
2836 dev_err(adev->dev, "Load microcode failed\n");
2837 return ret;
2838 }
2839 }
2840
2841 if (smu->ppt_funcs->check_fw_status) {
2842 ret = smu->ppt_funcs->check_fw_status(smu);
2843 if (ret) {
2844 dev_err(adev->dev, "SMC is not ready\n");
2845 return ret;
2846 }
2847 }
2848
2849 return ret;
2850 }
2851
smu_set_gfx_cgpg(struct smu_context * smu,bool enabled)2852 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2853 {
2854 int ret = 0;
2855
2856 if (smu->ppt_funcs->set_gfx_cgpg)
2857 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2858
2859 return ret;
2860 }
2861
smu_set_fan_speed_rpm(void * handle,uint32_t speed)2862 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2863 {
2864 struct smu_context *smu = handle;
2865 int ret = 0;
2866
2867 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2868 return -EOPNOTSUPP;
2869
2870 if (!smu->ppt_funcs->set_fan_speed_rpm)
2871 return -EOPNOTSUPP;
2872
2873 if (speed == U32_MAX)
2874 return -EINVAL;
2875
2876 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2877 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2878 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2879 smu->user_dpm_profile.fan_speed_rpm = speed;
2880
2881 /* Override custom PWM setting as they cannot co-exist */
2882 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2883 smu->user_dpm_profile.fan_speed_pwm = 0;
2884 }
2885
2886 return ret;
2887 }
2888
2889 /**
2890 * smu_get_power_limit - Request one of the SMU Power Limits
2891 *
2892 * @handle: pointer to smu context
2893 * @limit: requested limit is written back to this variable
2894 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2895 * @pp_power_type: &pp_power_type type of power
2896 * Return: 0 on success, <0 on error
2897 *
2898 */
smu_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type pp_power_type)2899 int smu_get_power_limit(void *handle,
2900 uint32_t *limit,
2901 enum pp_power_limit_level pp_limit_level,
2902 enum pp_power_type pp_power_type)
2903 {
2904 struct smu_context *smu = handle;
2905 struct amdgpu_device *adev = smu->adev;
2906 enum smu_ppt_limit_level limit_level;
2907 uint32_t limit_type;
2908 int ret = 0;
2909
2910 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2911 return -EOPNOTSUPP;
2912
2913 if (!limit)
2914 return -EINVAL;
2915
2916 switch (pp_power_type) {
2917 case PP_PWR_TYPE_SUSTAINED:
2918 limit_type = SMU_DEFAULT_PPT_LIMIT;
2919 break;
2920 case PP_PWR_TYPE_FAST:
2921 limit_type = SMU_FAST_PPT_LIMIT;
2922 break;
2923 default:
2924 return -EOPNOTSUPP;
2925 }
2926
2927 switch (pp_limit_level) {
2928 case PP_PWR_LIMIT_CURRENT:
2929 limit_level = SMU_PPT_LIMIT_CURRENT;
2930 break;
2931 case PP_PWR_LIMIT_DEFAULT:
2932 limit_level = SMU_PPT_LIMIT_DEFAULT;
2933 break;
2934 case PP_PWR_LIMIT_MAX:
2935 limit_level = SMU_PPT_LIMIT_MAX;
2936 break;
2937 case PP_PWR_LIMIT_MIN:
2938 limit_level = SMU_PPT_LIMIT_MIN;
2939 break;
2940 default:
2941 return -EOPNOTSUPP;
2942 }
2943
2944 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2945 if (smu->ppt_funcs->get_ppt_limit)
2946 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2947 else
2948 return -EOPNOTSUPP;
2949 } else {
2950 switch (limit_level) {
2951 case SMU_PPT_LIMIT_CURRENT:
2952 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2953 case IP_VERSION(13, 0, 2):
2954 case IP_VERSION(13, 0, 6):
2955 case IP_VERSION(13, 0, 12):
2956 case IP_VERSION(13, 0, 14):
2957 case IP_VERSION(11, 0, 7):
2958 case IP_VERSION(11, 0, 11):
2959 case IP_VERSION(11, 0, 12):
2960 case IP_VERSION(11, 0, 13):
2961 ret = smu_get_asic_power_limits(smu,
2962 &smu->current_power_limit,
2963 NULL, NULL, NULL);
2964 break;
2965 default:
2966 break;
2967 }
2968 *limit = smu->current_power_limit;
2969 break;
2970 case SMU_PPT_LIMIT_DEFAULT:
2971 *limit = smu->default_power_limit;
2972 break;
2973 case SMU_PPT_LIMIT_MAX:
2974 *limit = smu->max_power_limit;
2975 break;
2976 case SMU_PPT_LIMIT_MIN:
2977 *limit = smu->min_power_limit;
2978 break;
2979 default:
2980 return -EINVAL;
2981 }
2982 }
2983
2984 return ret;
2985 }
2986
smu_set_power_limit(void * handle,uint32_t limit_type,uint32_t limit)2987 static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
2988 {
2989 struct smu_context *smu = handle;
2990 int ret = 0;
2991
2992 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2993 return -EOPNOTSUPP;
2994
2995 if (limit_type == SMU_DEFAULT_PPT_LIMIT) {
2996 if (!limit)
2997 limit = smu->current_power_limit;
2998 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2999 dev_err(smu->adev->dev,
3000 "New power limit (%d) is out of range [%d,%d]\n",
3001 limit, smu->min_power_limit, smu->max_power_limit);
3002 return -EINVAL;
3003 }
3004 }
3005
3006 if (smu->ppt_funcs->set_power_limit) {
3007 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
3008 if (ret)
3009 return ret;
3010 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
3011 smu->user_dpm_profile.power_limits[limit_type] = limit;
3012 }
3013
3014 return 0;
3015 }
3016
smu_print_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)3017 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
3018 {
3019 int ret = 0;
3020
3021 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3022 return -EOPNOTSUPP;
3023
3024 if (smu->ppt_funcs->print_clk_levels)
3025 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
3026
3027 return ret;
3028 }
3029
smu_convert_to_smuclk(enum pp_clock_type type)3030 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
3031 {
3032 enum smu_clk_type clk_type;
3033
3034 switch (type) {
3035 case PP_SCLK:
3036 clk_type = SMU_SCLK; break;
3037 case PP_MCLK:
3038 clk_type = SMU_MCLK; break;
3039 case PP_PCIE:
3040 clk_type = SMU_PCIE; break;
3041 case PP_SOCCLK:
3042 clk_type = SMU_SOCCLK; break;
3043 case PP_FCLK:
3044 clk_type = SMU_FCLK; break;
3045 case PP_DCEFCLK:
3046 clk_type = SMU_DCEFCLK; break;
3047 case PP_VCLK:
3048 clk_type = SMU_VCLK; break;
3049 case PP_VCLK1:
3050 clk_type = SMU_VCLK1; break;
3051 case PP_DCLK:
3052 clk_type = SMU_DCLK; break;
3053 case PP_DCLK1:
3054 clk_type = SMU_DCLK1; break;
3055 case PP_ISPICLK:
3056 clk_type = SMU_ISPICLK;
3057 break;
3058 case PP_ISPXCLK:
3059 clk_type = SMU_ISPXCLK;
3060 break;
3061 case OD_SCLK:
3062 clk_type = SMU_OD_SCLK; break;
3063 case OD_MCLK:
3064 clk_type = SMU_OD_MCLK; break;
3065 case OD_VDDC_CURVE:
3066 clk_type = SMU_OD_VDDC_CURVE; break;
3067 case OD_RANGE:
3068 clk_type = SMU_OD_RANGE; break;
3069 case OD_VDDGFX_OFFSET:
3070 clk_type = SMU_OD_VDDGFX_OFFSET; break;
3071 case OD_CCLK:
3072 clk_type = SMU_OD_CCLK; break;
3073 case OD_FAN_CURVE:
3074 clk_type = SMU_OD_FAN_CURVE; break;
3075 case OD_ACOUSTIC_LIMIT:
3076 clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
3077 case OD_ACOUSTIC_TARGET:
3078 clk_type = SMU_OD_ACOUSTIC_TARGET; break;
3079 case OD_FAN_TARGET_TEMPERATURE:
3080 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
3081 case OD_FAN_MINIMUM_PWM:
3082 clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
3083 case OD_FAN_ZERO_RPM_ENABLE:
3084 clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;
3085 case OD_FAN_ZERO_RPM_STOP_TEMP:
3086 clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;
3087 default:
3088 clk_type = SMU_CLK_COUNT; break;
3089 }
3090
3091 return clk_type;
3092 }
3093
smu_print_ppclk_levels(void * handle,enum pp_clock_type type,char * buf)3094 static int smu_print_ppclk_levels(void *handle,
3095 enum pp_clock_type type,
3096 char *buf)
3097 {
3098 struct smu_context *smu = handle;
3099 enum smu_clk_type clk_type;
3100
3101 clk_type = smu_convert_to_smuclk(type);
3102 if (clk_type == SMU_CLK_COUNT)
3103 return -EINVAL;
3104
3105 return smu_print_smuclk_levels(smu, clk_type, buf);
3106 }
3107
smu_emit_ppclk_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)3108 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
3109 {
3110 struct smu_context *smu = handle;
3111 enum smu_clk_type clk_type;
3112
3113 clk_type = smu_convert_to_smuclk(type);
3114 if (clk_type == SMU_CLK_COUNT)
3115 return -EINVAL;
3116
3117 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3118 return -EOPNOTSUPP;
3119
3120 if (!smu->ppt_funcs->emit_clk_levels)
3121 return -ENOENT;
3122
3123 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
3124
3125 }
3126
smu_od_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)3127 static int smu_od_edit_dpm_table(void *handle,
3128 enum PP_OD_DPM_TABLE_COMMAND type,
3129 long *input, uint32_t size)
3130 {
3131 struct smu_context *smu = handle;
3132 int ret = 0;
3133
3134 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3135 return -EOPNOTSUPP;
3136
3137 if (smu->ppt_funcs->od_edit_dpm_table) {
3138 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
3139 }
3140
3141 return ret;
3142 }
3143
smu_read_sensor(void * handle,int sensor,void * data,int * size_arg)3144 static int smu_read_sensor(void *handle,
3145 int sensor,
3146 void *data,
3147 int *size_arg)
3148 {
3149 struct smu_context *smu = handle;
3150 struct amdgpu_device *adev = smu->adev;
3151 struct smu_umd_pstate_table *pstate_table =
3152 &smu->pstate_table;
3153 int i, ret = 0;
3154 uint32_t *size, size_val;
3155
3156 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3157 return -EOPNOTSUPP;
3158
3159 if (!data || !size_arg)
3160 return -EINVAL;
3161
3162 size_val = *size_arg;
3163 size = &size_val;
3164
3165 if (smu->ppt_funcs->read_sensor)
3166 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
3167 goto unlock;
3168
3169 switch (sensor) {
3170 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
3171 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
3172 *size = 4;
3173 break;
3174 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
3175 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
3176 *size = 4;
3177 break;
3178 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
3179 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
3180 *size = 4;
3181 break;
3182 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
3183 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
3184 *size = 4;
3185 break;
3186 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3187 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
3188 *size = 8;
3189 break;
3190 case AMDGPU_PP_SENSOR_UVD_POWER:
3191 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
3192 *size = 4;
3193 break;
3194 case AMDGPU_PP_SENSOR_VCE_POWER:
3195 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
3196 *size = 4;
3197 break;
3198 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
3199 *(uint32_t *)data = 0;
3200 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
3201 if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {
3202 *(uint32_t *)data = 1;
3203 break;
3204 }
3205 }
3206 *size = 4;
3207 break;
3208 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
3209 *(uint32_t *)data = 0;
3210 *size = 4;
3211 break;
3212 default:
3213 *size = 0;
3214 ret = -EOPNOTSUPP;
3215 break;
3216 }
3217
3218 unlock:
3219 // assign uint32_t to int
3220 *size_arg = size_val;
3221
3222 return ret;
3223 }
3224
smu_get_apu_thermal_limit(void * handle,uint32_t * limit)3225 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
3226 {
3227 int ret = -EOPNOTSUPP;
3228 struct smu_context *smu = handle;
3229
3230 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3231 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3232
3233 return ret;
3234 }
3235
smu_set_apu_thermal_limit(void * handle,uint32_t limit)3236 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3237 {
3238 int ret = -EOPNOTSUPP;
3239 struct smu_context *smu = handle;
3240
3241 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3242 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3243
3244 return ret;
3245 }
3246
smu_get_power_profile_mode(void * handle,char * buf)3247 static int smu_get_power_profile_mode(void *handle, char *buf)
3248 {
3249 struct smu_context *smu = handle;
3250
3251 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3252 !smu->ppt_funcs->get_power_profile_mode)
3253 return -EOPNOTSUPP;
3254 if (!buf)
3255 return -EINVAL;
3256
3257 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3258 }
3259
smu_set_power_profile_mode(void * handle,long * param,uint32_t param_size)3260 static int smu_set_power_profile_mode(void *handle,
3261 long *param,
3262 uint32_t param_size)
3263 {
3264 struct smu_context *smu = handle;
3265 bool custom = false;
3266 int ret = 0;
3267
3268 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3269 !smu->ppt_funcs->set_power_profile_mode)
3270 return -EOPNOTSUPP;
3271
3272 if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
3273 custom = true;
3274 /* clear frontend mask so custom changes propogate */
3275 smu->workload_mask = 0;
3276 }
3277
3278 if ((param[param_size] != smu->power_profile_mode) || custom) {
3279 /* clear the old user preference */
3280 smu_power_profile_mode_put(smu, smu->power_profile_mode);
3281 /* set the new user preference */
3282 smu_power_profile_mode_get(smu, param[param_size]);
3283 ret = smu_bump_power_profile_mode(smu,
3284 custom ? param : NULL,
3285 custom ? param_size : 0);
3286 if (ret)
3287 smu_power_profile_mode_put(smu, param[param_size]);
3288 else
3289 /* store the user's preference */
3290 smu->power_profile_mode = param[param_size];
3291 }
3292
3293 return ret;
3294 }
3295
smu_get_fan_control_mode(void * handle,u32 * fan_mode)3296 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3297 {
3298 struct smu_context *smu = handle;
3299
3300 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3301 return -EOPNOTSUPP;
3302
3303 if (!smu->ppt_funcs->get_fan_control_mode)
3304 return -EOPNOTSUPP;
3305
3306 if (!fan_mode)
3307 return -EINVAL;
3308
3309 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3310
3311 return 0;
3312 }
3313
smu_set_fan_control_mode(void * handle,u32 value)3314 static int smu_set_fan_control_mode(void *handle, u32 value)
3315 {
3316 struct smu_context *smu = handle;
3317 int ret = 0;
3318
3319 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3320 return -EOPNOTSUPP;
3321
3322 if (!smu->ppt_funcs->set_fan_control_mode)
3323 return -EOPNOTSUPP;
3324
3325 if (value == U32_MAX)
3326 return -EINVAL;
3327
3328 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3329 if (ret)
3330 goto out;
3331
3332 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3333 smu->user_dpm_profile.fan_mode = value;
3334
3335 /* reset user dpm fan speed */
3336 if (value != AMD_FAN_CTRL_MANUAL) {
3337 smu->user_dpm_profile.fan_speed_pwm = 0;
3338 smu->user_dpm_profile.fan_speed_rpm = 0;
3339 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3340 }
3341 }
3342
3343 out:
3344 return ret;
3345 }
3346
smu_get_fan_speed_pwm(void * handle,u32 * speed)3347 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3348 {
3349 struct smu_context *smu = handle;
3350 int ret = 0;
3351
3352 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3353 return -EOPNOTSUPP;
3354
3355 if (!smu->ppt_funcs->get_fan_speed_pwm)
3356 return -EOPNOTSUPP;
3357
3358 if (!speed)
3359 return -EINVAL;
3360
3361 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3362
3363 return ret;
3364 }
3365
smu_set_fan_speed_pwm(void * handle,u32 speed)3366 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3367 {
3368 struct smu_context *smu = handle;
3369 int ret = 0;
3370
3371 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3372 return -EOPNOTSUPP;
3373
3374 if (!smu->ppt_funcs->set_fan_speed_pwm)
3375 return -EOPNOTSUPP;
3376
3377 if (speed == U32_MAX)
3378 return -EINVAL;
3379
3380 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3381 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3382 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3383 smu->user_dpm_profile.fan_speed_pwm = speed;
3384
3385 /* Override custom RPM setting as they cannot co-exist */
3386 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3387 smu->user_dpm_profile.fan_speed_rpm = 0;
3388 }
3389
3390 return ret;
3391 }
3392
smu_get_fan_speed_rpm(void * handle,uint32_t * speed)3393 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3394 {
3395 struct smu_context *smu = handle;
3396 int ret = 0;
3397
3398 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3399 return -EOPNOTSUPP;
3400
3401 if (!smu->ppt_funcs->get_fan_speed_rpm)
3402 return -EOPNOTSUPP;
3403
3404 if (!speed)
3405 return -EINVAL;
3406
3407 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3408
3409 return ret;
3410 }
3411
smu_set_deep_sleep_dcefclk(void * handle,uint32_t clk)3412 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3413 {
3414 struct smu_context *smu = handle;
3415
3416 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3417 return -EOPNOTSUPP;
3418
3419 return smu_set_min_dcef_deep_sleep(smu, clk);
3420 }
3421
smu_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)3422 static int smu_get_clock_by_type_with_latency(void *handle,
3423 enum amd_pp_clock_type type,
3424 struct pp_clock_levels_with_latency *clocks)
3425 {
3426 struct smu_context *smu = handle;
3427 enum smu_clk_type clk_type;
3428 int ret = 0;
3429
3430 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3431 return -EOPNOTSUPP;
3432
3433 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3434 switch (type) {
3435 case amd_pp_sys_clock:
3436 clk_type = SMU_GFXCLK;
3437 break;
3438 case amd_pp_mem_clock:
3439 clk_type = SMU_MCLK;
3440 break;
3441 case amd_pp_dcef_clock:
3442 clk_type = SMU_DCEFCLK;
3443 break;
3444 case amd_pp_disp_clock:
3445 clk_type = SMU_DISPCLK;
3446 break;
3447 default:
3448 dev_err(smu->adev->dev, "Invalid clock type!\n");
3449 return -EINVAL;
3450 }
3451
3452 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3453 }
3454
3455 return ret;
3456 }
3457
smu_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock_req)3458 static int smu_display_clock_voltage_request(void *handle,
3459 struct pp_display_clock_request *clock_req)
3460 {
3461 struct smu_context *smu = handle;
3462 int ret = 0;
3463
3464 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3465 return -EOPNOTSUPP;
3466
3467 if (smu->ppt_funcs->display_clock_voltage_request)
3468 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3469
3470 return ret;
3471 }
3472
3473
smu_display_disable_memory_clock_switch(void * handle,bool disable_memory_clock_switch)3474 static int smu_display_disable_memory_clock_switch(void *handle,
3475 bool disable_memory_clock_switch)
3476 {
3477 struct smu_context *smu = handle;
3478 int ret = -EINVAL;
3479
3480 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3481 return -EOPNOTSUPP;
3482
3483 if (smu->ppt_funcs->display_disable_memory_clock_switch)
3484 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3485
3486 return ret;
3487 }
3488
smu_set_xgmi_pstate(void * handle,uint32_t pstate)3489 static int smu_set_xgmi_pstate(void *handle,
3490 uint32_t pstate)
3491 {
3492 struct smu_context *smu = handle;
3493 int ret = 0;
3494
3495 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3496 return -EOPNOTSUPP;
3497
3498 if (smu->ppt_funcs->set_xgmi_pstate)
3499 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3500
3501 if (ret)
3502 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3503
3504 return ret;
3505 }
3506
smu_get_baco_capability(void * handle)3507 static int smu_get_baco_capability(void *handle)
3508 {
3509 struct smu_context *smu = handle;
3510
3511 if (!smu->pm_enabled)
3512 return false;
3513
3514 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3515 return false;
3516
3517 return smu->ppt_funcs->get_bamaco_support(smu);
3518 }
3519
smu_baco_set_state(void * handle,int state)3520 static int smu_baco_set_state(void *handle, int state)
3521 {
3522 struct smu_context *smu = handle;
3523 int ret = 0;
3524
3525 if (!smu->pm_enabled)
3526 return -EOPNOTSUPP;
3527
3528 if (state == 0) {
3529 if (smu->ppt_funcs->baco_exit)
3530 ret = smu->ppt_funcs->baco_exit(smu);
3531 } else if (state == 1) {
3532 if (smu->ppt_funcs->baco_enter)
3533 ret = smu->ppt_funcs->baco_enter(smu);
3534 } else {
3535 return -EINVAL;
3536 }
3537
3538 if (ret)
3539 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3540 (state)?"enter":"exit");
3541
3542 return ret;
3543 }
3544
smu_mode1_reset_is_support(struct smu_context * smu)3545 bool smu_mode1_reset_is_support(struct smu_context *smu)
3546 {
3547 bool ret = false;
3548
3549 if (!smu->pm_enabled)
3550 return false;
3551
3552 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3553 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3554
3555 return ret;
3556 }
3557
smu_link_reset_is_support(struct smu_context * smu)3558 bool smu_link_reset_is_support(struct smu_context *smu)
3559 {
3560 if (!smu->pm_enabled)
3561 return false;
3562
3563 return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
3564 }
3565
smu_mode1_reset(struct smu_context * smu)3566 int smu_mode1_reset(struct smu_context *smu)
3567 {
3568 int ret = 0;
3569
3570 if (!smu->pm_enabled)
3571 return -EOPNOTSUPP;
3572
3573 if (smu->ppt_funcs->mode1_reset)
3574 ret = smu->ppt_funcs->mode1_reset(smu);
3575
3576 return ret;
3577 }
3578
smu_mode2_reset(void * handle)3579 static int smu_mode2_reset(void *handle)
3580 {
3581 struct smu_context *smu = handle;
3582 int ret = 0;
3583
3584 if (!smu->pm_enabled)
3585 return -EOPNOTSUPP;
3586
3587 if (smu->ppt_funcs->mode2_reset)
3588 ret = smu->ppt_funcs->mode2_reset(smu);
3589
3590 if (ret)
3591 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3592
3593 return ret;
3594 }
3595
smu_link_reset(struct smu_context * smu)3596 int smu_link_reset(struct smu_context *smu)
3597 {
3598 int ret = 0;
3599
3600 if (!smu->pm_enabled)
3601 return -EOPNOTSUPP;
3602
3603 if (smu->ppt_funcs->link_reset)
3604 ret = smu->ppt_funcs->link_reset(smu);
3605
3606 return ret;
3607 }
3608
smu_enable_gfx_features(void * handle)3609 static int smu_enable_gfx_features(void *handle)
3610 {
3611 struct smu_context *smu = handle;
3612 int ret = 0;
3613
3614 if (!smu->pm_enabled)
3615 return -EOPNOTSUPP;
3616
3617 if (smu->ppt_funcs->enable_gfx_features)
3618 ret = smu->ppt_funcs->enable_gfx_features(smu);
3619
3620 if (ret)
3621 dev_err(smu->adev->dev, "enable gfx features failed!\n");
3622
3623 return ret;
3624 }
3625
smu_get_max_sustainable_clocks_by_dc(void * handle,struct pp_smu_nv_clock_table * max_clocks)3626 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3627 struct pp_smu_nv_clock_table *max_clocks)
3628 {
3629 struct smu_context *smu = handle;
3630 int ret = 0;
3631
3632 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3633 return -EOPNOTSUPP;
3634
3635 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3636 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3637
3638 return ret;
3639 }
3640
smu_get_uclk_dpm_states(void * handle,unsigned int * clock_values_in_khz,unsigned int * num_states)3641 static int smu_get_uclk_dpm_states(void *handle,
3642 unsigned int *clock_values_in_khz,
3643 unsigned int *num_states)
3644 {
3645 struct smu_context *smu = handle;
3646 int ret = 0;
3647
3648 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3649 return -EOPNOTSUPP;
3650
3651 if (smu->ppt_funcs->get_uclk_dpm_states)
3652 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3653
3654 return ret;
3655 }
3656
smu_get_current_power_state(void * handle)3657 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3658 {
3659 struct smu_context *smu = handle;
3660 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3661
3662 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3663 return -EOPNOTSUPP;
3664
3665 if (smu->ppt_funcs->get_current_power_state)
3666 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3667
3668 return pm_state;
3669 }
3670
smu_get_dpm_clock_table(void * handle,struct dpm_clocks * clock_table)3671 static int smu_get_dpm_clock_table(void *handle,
3672 struct dpm_clocks *clock_table)
3673 {
3674 struct smu_context *smu = handle;
3675 int ret = 0;
3676
3677 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3678 return -EOPNOTSUPP;
3679
3680 if (smu->ppt_funcs->get_dpm_clock_table)
3681 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3682
3683 return ret;
3684 }
3685
smu_sys_get_gpu_metrics(void * handle,void ** table)3686 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3687 {
3688 struct smu_context *smu = handle;
3689
3690 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3691 return -EOPNOTSUPP;
3692
3693 if (!smu->ppt_funcs->get_gpu_metrics)
3694 return -EOPNOTSUPP;
3695
3696 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3697 }
3698
smu_sys_get_pm_metrics(void * handle,void * pm_metrics,size_t size)3699 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3700 size_t size)
3701 {
3702 struct smu_context *smu = handle;
3703
3704 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3705 return -EOPNOTSUPP;
3706
3707 if (!smu->ppt_funcs->get_pm_metrics)
3708 return -EOPNOTSUPP;
3709
3710 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3711 }
3712
smu_enable_mgpu_fan_boost(void * handle)3713 static int smu_enable_mgpu_fan_boost(void *handle)
3714 {
3715 struct smu_context *smu = handle;
3716 int ret = 0;
3717
3718 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3719 return -EOPNOTSUPP;
3720
3721 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3722 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3723
3724 return ret;
3725 }
3726
smu_gfx_state_change_set(void * handle,uint32_t state)3727 static int smu_gfx_state_change_set(void *handle,
3728 uint32_t state)
3729 {
3730 struct smu_context *smu = handle;
3731 int ret = 0;
3732
3733 if (smu->ppt_funcs->gfx_state_change_set)
3734 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3735
3736 return ret;
3737 }
3738
smu_handle_passthrough_sbr(struct smu_context * smu,bool enable)3739 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3740 {
3741 int ret = 0;
3742
3743 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3744 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3745
3746 return ret;
3747 }
3748
smu_get_ecc_info(struct smu_context * smu,void * umc_ecc)3749 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3750 {
3751 int ret = -EOPNOTSUPP;
3752
3753 if (smu->ppt_funcs &&
3754 smu->ppt_funcs->get_ecc_info)
3755 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3756
3757 return ret;
3758
3759 }
3760
smu_get_prv_buffer_details(void * handle,void ** addr,size_t * size)3761 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3762 {
3763 struct smu_context *smu = handle;
3764 struct smu_table_context *smu_table = &smu->smu_table;
3765 struct smu_table *memory_pool = &smu_table->memory_pool;
3766
3767 if (!addr || !size)
3768 return -EINVAL;
3769
3770 *addr = NULL;
3771 *size = 0;
3772 if (memory_pool->bo) {
3773 *addr = memory_pool->cpu_addr;
3774 *size = memory_pool->size;
3775 }
3776
3777 return 0;
3778 }
3779
smu_print_dpm_policy(struct smu_dpm_policy * policy,char * sysbuf,size_t * size)3780 static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3781 size_t *size)
3782 {
3783 size_t offset = *size;
3784 int level;
3785
3786 for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3787 if (level == policy->current_level)
3788 offset += sysfs_emit_at(sysbuf, offset,
3789 "%d : %s*\n", level,
3790 policy->desc->get_desc(policy, level));
3791 else
3792 offset += sysfs_emit_at(sysbuf, offset,
3793 "%d : %s\n", level,
3794 policy->desc->get_desc(policy, level));
3795 }
3796
3797 *size = offset;
3798 }
3799
smu_get_pm_policy_info(struct smu_context * smu,enum pp_pm_policy p_type,char * sysbuf)3800 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3801 enum pp_pm_policy p_type, char *sysbuf)
3802 {
3803 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3804 struct smu_dpm_policy_ctxt *policy_ctxt;
3805 struct smu_dpm_policy *dpm_policy;
3806 size_t offset = 0;
3807
3808 policy_ctxt = dpm_ctxt->dpm_policies;
3809 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3810 !policy_ctxt->policy_mask)
3811 return -EOPNOTSUPP;
3812
3813 if (p_type == PP_PM_POLICY_NONE)
3814 return -EINVAL;
3815
3816 dpm_policy = smu_get_pm_policy(smu, p_type);
3817 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3818 return -ENOENT;
3819
3820 if (!sysbuf)
3821 return -EINVAL;
3822
3823 smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3824
3825 return offset;
3826 }
3827
smu_get_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type)3828 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3829 enum pp_pm_policy p_type)
3830 {
3831 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3832 struct smu_dpm_policy_ctxt *policy_ctxt;
3833 int i;
3834
3835 policy_ctxt = dpm_ctxt->dpm_policies;
3836 if (!policy_ctxt)
3837 return NULL;
3838
3839 for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3840 if (policy_ctxt->policies[i].policy_type == p_type)
3841 return &policy_ctxt->policies[i];
3842 }
3843
3844 return NULL;
3845 }
3846
smu_set_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type,int level)3847 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3848 int level)
3849 {
3850 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3851 struct smu_dpm_policy *dpm_policy = NULL;
3852 struct smu_dpm_policy_ctxt *policy_ctxt;
3853 int ret = -EOPNOTSUPP;
3854
3855 policy_ctxt = dpm_ctxt->dpm_policies;
3856 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3857 !policy_ctxt->policy_mask)
3858 return ret;
3859
3860 if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3861 return -EINVAL;
3862
3863 dpm_policy = smu_get_pm_policy(smu, p_type);
3864
3865 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3866 return ret;
3867
3868 if (dpm_policy->current_level == level)
3869 return 0;
3870
3871 ret = dpm_policy->set_policy(smu, level);
3872
3873 if (!ret)
3874 dpm_policy->current_level = level;
3875
3876 return ret;
3877 }
3878
smu_sys_get_temp_metrics(void * handle,enum smu_temp_metric_type type,void * table)3879 static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)
3880 {
3881 struct smu_context *smu = handle;
3882 struct smu_table_context *smu_table = &smu->smu_table;
3883 struct smu_table *tables = smu_table->tables;
3884 enum smu_table_id table_id;
3885
3886 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3887 return -EOPNOTSUPP;
3888
3889 if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics)
3890 return -EOPNOTSUPP;
3891
3892 table_id = smu_metrics_get_temp_table_id(type);
3893
3894 if (table_id == SMU_TABLE_COUNT)
3895 return -EINVAL;
3896
3897 /* If the request is to get size alone, return the cached table size */
3898 if (!table && tables[table_id].cache.size)
3899 return tables[table_id].cache.size;
3900
3901 if (smu_table_cache_is_valid(&tables[table_id])) {
3902 memcpy(table, tables[table_id].cache.buffer,
3903 tables[table_id].cache.size);
3904 return tables[table_id].cache.size;
3905 }
3906
3907 return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);
3908 }
3909
smu_temp_metrics_is_supported(void * handle,enum smu_temp_metric_type type)3910 static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type)
3911 {
3912 struct smu_context *smu = handle;
3913 bool ret = false;
3914
3915 if (!smu->pm_enabled)
3916 return false;
3917
3918 if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported)
3919 ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type);
3920
3921 return ret;
3922 }
3923
smu_sys_get_xcp_metrics(void * handle,int xcp_id,void * table)3924 static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
3925 {
3926 struct smu_context *smu = handle;
3927
3928 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3929 return -EOPNOTSUPP;
3930
3931 if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
3932 return -EOPNOTSUPP;
3933
3934 return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
3935 }
3936
3937 static const struct amd_pm_funcs swsmu_pm_funcs = {
3938 /* export for sysfs */
3939 .set_fan_control_mode = smu_set_fan_control_mode,
3940 .get_fan_control_mode = smu_get_fan_control_mode,
3941 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3942 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3943 .force_clock_level = smu_force_ppclk_levels,
3944 .print_clock_levels = smu_print_ppclk_levels,
3945 .emit_clock_levels = smu_emit_ppclk_levels,
3946 .force_performance_level = smu_force_performance_level,
3947 .read_sensor = smu_read_sensor,
3948 .get_apu_thermal_limit = smu_get_apu_thermal_limit,
3949 .set_apu_thermal_limit = smu_set_apu_thermal_limit,
3950 .get_performance_level = smu_get_performance_level,
3951 .get_current_power_state = smu_get_current_power_state,
3952 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3953 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3954 .get_pp_num_states = smu_get_power_num_states,
3955 .get_pp_table = smu_sys_get_pp_table,
3956 .set_pp_table = smu_sys_set_pp_table,
3957 .switch_power_profile = smu_switch_power_profile,
3958 .pause_power_profile = smu_pause_power_profile,
3959 /* export to amdgpu */
3960 .dispatch_tasks = smu_handle_dpm_task,
3961 .load_firmware = smu_load_microcode,
3962 .set_powergating_by_smu = smu_dpm_set_power_gate,
3963 .set_power_limit = smu_set_power_limit,
3964 .get_power_limit = smu_get_power_limit,
3965 .get_power_profile_mode = smu_get_power_profile_mode,
3966 .set_power_profile_mode = smu_set_power_profile_mode,
3967 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3968 .set_mp1_state = smu_set_mp1_state,
3969 .gfx_state_change_set = smu_gfx_state_change_set,
3970 /* export to DC */
3971 .get_sclk = smu_get_sclk,
3972 .get_mclk = smu_get_mclk,
3973 .display_configuration_change = smu_display_configuration_change,
3974 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3975 .display_clock_voltage_request = smu_display_clock_voltage_request,
3976 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3977 .set_active_display_count = smu_set_display_count,
3978 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3979 .get_asic_baco_capability = smu_get_baco_capability,
3980 .set_asic_baco_state = smu_baco_set_state,
3981 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3982 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3983 .asic_reset_mode_2 = smu_mode2_reset,
3984 .asic_reset_enable_gfx_features = smu_enable_gfx_features,
3985 .set_df_cstate = smu_set_df_cstate,
3986 .set_xgmi_pstate = smu_set_xgmi_pstate,
3987 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3988 .get_pm_metrics = smu_sys_get_pm_metrics,
3989 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3990 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3991 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3992 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3993 .get_dpm_clock_table = smu_get_dpm_clock_table,
3994 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3995 .get_xcp_metrics = smu_sys_get_xcp_metrics,
3996 .get_temp_metrics = smu_sys_get_temp_metrics,
3997 .temp_metrics_is_supported = smu_temp_metrics_is_supported,
3998 };
3999
smu_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)4000 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
4001 uint64_t event_arg)
4002 {
4003 int ret = -EINVAL;
4004
4005 if (smu->ppt_funcs->wait_for_event)
4006 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
4007
4008 return ret;
4009 }
4010
smu_stb_collect_info(struct smu_context * smu,void * buf,uint32_t size)4011 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
4012 {
4013
4014 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
4015 return -EOPNOTSUPP;
4016
4017 /* Confirm the buffer allocated is of correct size */
4018 if (size != smu->stb_context.stb_buf_size)
4019 return -EINVAL;
4020
4021 /*
4022 * No need to lock smu mutex as we access STB directly through MMIO
4023 * and not going through SMU messaging route (for now at least).
4024 * For registers access rely on implementation internal locking.
4025 */
4026 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
4027 }
4028
4029 #if defined(CONFIG_DEBUG_FS)
4030
smu_stb_debugfs_open(struct inode * inode,struct file * filp)4031 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
4032 {
4033 struct amdgpu_device *adev = filp->f_inode->i_private;
4034 struct smu_context *smu = adev->powerplay.pp_handle;
4035 unsigned char *buf;
4036 int r;
4037
4038 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
4039 if (!buf)
4040 return -ENOMEM;
4041
4042 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
4043 if (r)
4044 goto out;
4045
4046 filp->private_data = buf;
4047
4048 return 0;
4049
4050 out:
4051 kvfree(buf);
4052 return r;
4053 }
4054
smu_stb_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)4055 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
4056 loff_t *pos)
4057 {
4058 struct amdgpu_device *adev = filp->f_inode->i_private;
4059 struct smu_context *smu = adev->powerplay.pp_handle;
4060
4061
4062 if (!filp->private_data)
4063 return -EINVAL;
4064
4065 return simple_read_from_buffer(buf,
4066 size,
4067 pos, filp->private_data,
4068 smu->stb_context.stb_buf_size);
4069 }
4070
smu_stb_debugfs_release(struct inode * inode,struct file * filp)4071 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
4072 {
4073 kvfree(filp->private_data);
4074 filp->private_data = NULL;
4075
4076 return 0;
4077 }
4078
4079 /*
4080 * We have to define not only read method but also
4081 * open and release because .read takes up to PAGE_SIZE
4082 * data each time so and so is invoked multiple times.
4083 * We allocate the STB buffer in .open and release it
4084 * in .release
4085 */
4086 static const struct file_operations smu_stb_debugfs_fops = {
4087 .owner = THIS_MODULE,
4088 .open = smu_stb_debugfs_open,
4089 .read = smu_stb_debugfs_read,
4090 .release = smu_stb_debugfs_release,
4091 .llseek = default_llseek,
4092 };
4093
4094 #endif
4095
amdgpu_smu_stb_debug_fs_init(struct amdgpu_device * adev)4096 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
4097 {
4098 #if defined(CONFIG_DEBUG_FS)
4099
4100 struct smu_context *smu = adev->powerplay.pp_handle;
4101
4102 if (!smu || (!smu->stb_context.stb_buf_size))
4103 return;
4104
4105 debugfs_create_file_size("amdgpu_smu_stb_dump",
4106 S_IRUSR,
4107 adev_to_drm(adev)->primary->debugfs_root,
4108 adev,
4109 &smu_stb_debugfs_fops,
4110 smu->stb_context.stb_buf_size);
4111 #endif
4112 }
4113
smu_send_hbm_bad_pages_num(struct smu_context * smu,uint32_t size)4114 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
4115 {
4116 int ret = 0;
4117
4118 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
4119 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
4120
4121 return ret;
4122 }
4123
smu_send_hbm_bad_channel_flag(struct smu_context * smu,uint32_t size)4124 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
4125 {
4126 int ret = 0;
4127
4128 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
4129 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
4130
4131 return ret;
4132 }
4133
smu_send_rma_reason(struct smu_context * smu)4134 int smu_send_rma_reason(struct smu_context *smu)
4135 {
4136 int ret = 0;
4137
4138 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
4139 ret = smu->ppt_funcs->send_rma_reason(smu);
4140
4141 return ret;
4142 }
4143
4144 /**
4145 * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU
4146 * @smu: smu_context pointer
4147 *
4148 * This function checks if the SMU supports resetting the SDMA engine.
4149 * It returns true if supported, false otherwise.
4150 */
smu_reset_sdma_is_supported(struct smu_context * smu)4151 bool smu_reset_sdma_is_supported(struct smu_context *smu)
4152 {
4153 return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
4154 }
4155
smu_reset_sdma(struct smu_context * smu,uint32_t inst_mask)4156 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
4157 {
4158 int ret = 0;
4159
4160 if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)
4161 ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);
4162
4163 return ret;
4164 }
4165
smu_reset_vcn_is_supported(struct smu_context * smu)4166 bool smu_reset_vcn_is_supported(struct smu_context *smu)
4167 {
4168 return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
4169 }
4170
smu_reset_vcn(struct smu_context * smu,uint32_t inst_mask)4171 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
4172 {
4173 if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
4174 smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
4175
4176 return 0;
4177 }
4178