1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L1
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "smu_v14_0_0_ppt.h"
48 #include "smu_v14_0_2_ppt.h"
49 #include "amd_pcie.h"
50
51 /*
52 * DO NOT use these for err/warn/info/debug messages.
53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54 * They are more MGPU friendly.
55 */
56 #undef pr_err
57 #undef pr_warn
58 #undef pr_info
59 #undef pr_debug
60
61 static const struct amd_pm_funcs swsmu_pm_funcs;
62 static int smu_force_smuclk_levels(struct smu_context *smu,
63 enum smu_clk_type clk_type,
64 uint32_t mask);
65 static int smu_handle_task(struct smu_context *smu,
66 enum amd_dpm_forced_level level,
67 enum amd_pp_task task_id);
68 static int smu_reset(struct smu_context *smu);
69 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70 static int smu_set_fan_control_mode(void *handle, u32 value);
71 static int smu_set_power_limit(void *handle, uint32_t limit);
72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75 static void smu_power_profile_mode_get(struct smu_context *smu,
76 enum PP_SMC_POWER_PROFILE profile_mode);
77 static void smu_power_profile_mode_put(struct smu_context *smu,
78 enum PP_SMC_POWER_PROFILE profile_mode);
79 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
80
smu_sys_get_pp_feature_mask(void * handle,char * buf)81 static int smu_sys_get_pp_feature_mask(void *handle,
82 char *buf)
83 {
84 struct smu_context *smu = handle;
85
86 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
87 return -EOPNOTSUPP;
88
89 return smu_get_pp_feature_mask(smu, buf);
90 }
91
smu_sys_set_pp_feature_mask(void * handle,uint64_t new_mask)92 static int smu_sys_set_pp_feature_mask(void *handle,
93 uint64_t new_mask)
94 {
95 struct smu_context *smu = handle;
96
97 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
98 return -EOPNOTSUPP;
99
100 return smu_set_pp_feature_mask(smu, new_mask);
101 }
102
smu_set_residency_gfxoff(struct smu_context * smu,bool value)103 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
104 {
105 if (!smu->ppt_funcs->set_gfx_off_residency)
106 return -EINVAL;
107
108 return smu_set_gfx_off_residency(smu, value);
109 }
110
smu_get_residency_gfxoff(struct smu_context * smu,u32 * value)111 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
112 {
113 if (!smu->ppt_funcs->get_gfx_off_residency)
114 return -EINVAL;
115
116 return smu_get_gfx_off_residency(smu, value);
117 }
118
smu_get_entrycount_gfxoff(struct smu_context * smu,u64 * value)119 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
120 {
121 if (!smu->ppt_funcs->get_gfx_off_entrycount)
122 return -EINVAL;
123
124 return smu_get_gfx_off_entrycount(smu, value);
125 }
126
smu_get_status_gfxoff(struct smu_context * smu,uint32_t * value)127 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
128 {
129 if (!smu->ppt_funcs->get_gfx_off_status)
130 return -EINVAL;
131
132 *value = smu_get_gfx_off_status(smu);
133
134 return 0;
135 }
136
smu_set_soft_freq_range(struct smu_context * smu,enum pp_clock_type type,uint32_t min,uint32_t max)137 int smu_set_soft_freq_range(struct smu_context *smu,
138 enum pp_clock_type type,
139 uint32_t min,
140 uint32_t max)
141 {
142 enum smu_clk_type clk_type;
143 int ret = 0;
144
145 clk_type = smu_convert_to_smuclk(type);
146 if (clk_type == SMU_CLK_COUNT)
147 return -EINVAL;
148
149 if (smu->ppt_funcs->set_soft_freq_limited_range)
150 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
151 clk_type,
152 min,
153 max,
154 false);
155
156 return ret;
157 }
158
smu_get_dpm_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)159 int smu_get_dpm_freq_range(struct smu_context *smu,
160 enum smu_clk_type clk_type,
161 uint32_t *min,
162 uint32_t *max)
163 {
164 int ret = -ENOTSUPP;
165
166 if (!min && !max)
167 return -EINVAL;
168
169 if (smu->ppt_funcs->get_dpm_ultimate_freq)
170 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
171 clk_type,
172 min,
173 max);
174
175 return ret;
176 }
177
smu_set_gfx_power_up_by_imu(struct smu_context * smu)178 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
179 {
180 int ret = 0;
181 struct amdgpu_device *adev = smu->adev;
182
183 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
184 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
185 if (ret)
186 dev_err(adev->dev, "Failed to enable gfx imu!\n");
187 }
188 return ret;
189 }
190
smu_get_mclk(void * handle,bool low)191 static u32 smu_get_mclk(void *handle, bool low)
192 {
193 struct smu_context *smu = handle;
194 uint32_t clk_freq;
195 int ret = 0;
196
197 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
198 low ? &clk_freq : NULL,
199 !low ? &clk_freq : NULL);
200 if (ret)
201 return 0;
202 return clk_freq * 100;
203 }
204
smu_get_sclk(void * handle,bool low)205 static u32 smu_get_sclk(void *handle, bool low)
206 {
207 struct smu_context *smu = handle;
208 uint32_t clk_freq;
209 int ret = 0;
210
211 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
212 low ? &clk_freq : NULL,
213 !low ? &clk_freq : NULL);
214 if (ret)
215 return 0;
216 return clk_freq * 100;
217 }
218
smu_set_gfx_imu_enable(struct smu_context * smu)219 static int smu_set_gfx_imu_enable(struct smu_context *smu)
220 {
221 struct amdgpu_device *adev = smu->adev;
222
223 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
224 return 0;
225
226 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
227 return 0;
228
229 return smu_set_gfx_power_up_by_imu(smu);
230 }
231
is_vcn_enabled(struct amdgpu_device * adev)232 static bool is_vcn_enabled(struct amdgpu_device *adev)
233 {
234 int i;
235
236 for (i = 0; i < adev->num_ip_blocks; i++) {
237 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
238 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
239 !adev->ip_blocks[i].status.valid)
240 return false;
241 }
242
243 return true;
244 }
245
smu_dpm_set_vcn_enable(struct smu_context * smu,bool enable,int inst)246 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
247 bool enable,
248 int inst)
249 {
250 struct smu_power_context *smu_power = &smu->smu_power;
251 struct smu_power_gate *power_gate = &smu_power->power_gate;
252 int ret = 0;
253
254 /*
255 * don't poweron vcn/jpeg when they are skipped.
256 */
257 if (!is_vcn_enabled(smu->adev))
258 return 0;
259
260 if (!smu->ppt_funcs->dpm_set_vcn_enable)
261 return 0;
262
263 if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)
264 return 0;
265
266 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);
267 if (!ret)
268 atomic_set(&power_gate->vcn_gated[inst], !enable);
269
270 return ret;
271 }
272
smu_dpm_set_jpeg_enable(struct smu_context * smu,bool enable)273 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
274 bool enable)
275 {
276 struct smu_power_context *smu_power = &smu->smu_power;
277 struct smu_power_gate *power_gate = &smu_power->power_gate;
278 int ret = 0;
279
280 if (!is_vcn_enabled(smu->adev))
281 return 0;
282
283 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
284 return 0;
285
286 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
287 return 0;
288
289 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
290 if (!ret)
291 atomic_set(&power_gate->jpeg_gated, !enable);
292
293 return ret;
294 }
295
smu_dpm_set_vpe_enable(struct smu_context * smu,bool enable)296 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
297 bool enable)
298 {
299 struct smu_power_context *smu_power = &smu->smu_power;
300 struct smu_power_gate *power_gate = &smu_power->power_gate;
301 int ret = 0;
302
303 if (!smu->ppt_funcs->dpm_set_vpe_enable)
304 return 0;
305
306 if (atomic_read(&power_gate->vpe_gated) ^ enable)
307 return 0;
308
309 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
310 if (!ret)
311 atomic_set(&power_gate->vpe_gated, !enable);
312
313 return ret;
314 }
315
smu_dpm_set_isp_enable(struct smu_context * smu,bool enable)316 static int smu_dpm_set_isp_enable(struct smu_context *smu,
317 bool enable)
318 {
319 struct smu_power_context *smu_power = &smu->smu_power;
320 struct smu_power_gate *power_gate = &smu_power->power_gate;
321 int ret;
322
323 if (!smu->ppt_funcs->dpm_set_isp_enable)
324 return 0;
325
326 if (atomic_read(&power_gate->isp_gated) ^ enable)
327 return 0;
328
329 ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);
330 if (!ret)
331 atomic_set(&power_gate->isp_gated, !enable);
332
333 return ret;
334 }
335
smu_dpm_set_umsch_mm_enable(struct smu_context * smu,bool enable)336 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
337 bool enable)
338 {
339 struct smu_power_context *smu_power = &smu->smu_power;
340 struct smu_power_gate *power_gate = &smu_power->power_gate;
341 int ret = 0;
342
343 if (!smu->adev->enable_umsch_mm)
344 return 0;
345
346 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
347 return 0;
348
349 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
350 return 0;
351
352 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
353 if (!ret)
354 atomic_set(&power_gate->umsch_mm_gated, !enable);
355
356 return ret;
357 }
358
smu_set_mall_enable(struct smu_context * smu)359 static int smu_set_mall_enable(struct smu_context *smu)
360 {
361 int ret = 0;
362
363 if (!smu->ppt_funcs->set_mall_enable)
364 return 0;
365
366 ret = smu->ppt_funcs->set_mall_enable(smu);
367
368 return ret;
369 }
370
371 /**
372 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
373 *
374 * @handle: smu_context pointer
375 * @block_type: the IP block to power gate/ungate
376 * @gate: to power gate if true, ungate otherwise
377 * @inst: the instance of the IP block to power gate/ungate
378 *
379 * This API uses no smu->mutex lock protection due to:
380 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
381 * This is guarded to be race condition free by the caller.
382 * 2. Or get called on user setting request of power_dpm_force_performance_level.
383 * Under this case, the smu->mutex lock protection is already enforced on
384 * the parent API smu_force_performance_level of the call path.
385 */
smu_dpm_set_power_gate(void * handle,uint32_t block_type,bool gate,int inst)386 static int smu_dpm_set_power_gate(void *handle,
387 uint32_t block_type,
388 bool gate,
389 int inst)
390 {
391 struct smu_context *smu = handle;
392 int ret = 0;
393
394 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
395 dev_WARN(smu->adev->dev,
396 "SMU uninitialized but power %s requested for %u!\n",
397 gate ? "gate" : "ungate", block_type);
398 return -EOPNOTSUPP;
399 }
400
401 switch (block_type) {
402 /*
403 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
404 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
405 */
406 case AMD_IP_BLOCK_TYPE_UVD:
407 case AMD_IP_BLOCK_TYPE_VCN:
408 ret = smu_dpm_set_vcn_enable(smu, !gate, inst);
409 if (ret)
410 dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",
411 gate ? "gate" : "ungate", inst);
412 break;
413 case AMD_IP_BLOCK_TYPE_GFX:
414 ret = smu_gfx_off_control(smu, gate);
415 if (ret)
416 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
417 gate ? "enable" : "disable");
418 break;
419 case AMD_IP_BLOCK_TYPE_SDMA:
420 ret = smu_powergate_sdma(smu, gate);
421 if (ret)
422 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
423 gate ? "gate" : "ungate");
424 break;
425 case AMD_IP_BLOCK_TYPE_JPEG:
426 ret = smu_dpm_set_jpeg_enable(smu, !gate);
427 if (ret)
428 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
429 gate ? "gate" : "ungate");
430 break;
431 case AMD_IP_BLOCK_TYPE_VPE:
432 ret = smu_dpm_set_vpe_enable(smu, !gate);
433 if (ret)
434 dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
435 gate ? "gate" : "ungate");
436 break;
437 case AMD_IP_BLOCK_TYPE_ISP:
438 ret = smu_dpm_set_isp_enable(smu, !gate);
439 if (ret)
440 dev_err(smu->adev->dev, "Failed to power %s ISP!\n",
441 gate ? "gate" : "ungate");
442 break;
443 default:
444 dev_err(smu->adev->dev, "Unsupported block type!\n");
445 return -EINVAL;
446 }
447
448 return ret;
449 }
450
451 /**
452 * smu_set_user_clk_dependencies - set user profile clock dependencies
453 *
454 * @smu: smu_context pointer
455 * @clk: enum smu_clk_type type
456 *
457 * Enable/Disable the clock dependency for the @clk type.
458 */
smu_set_user_clk_dependencies(struct smu_context * smu,enum smu_clk_type clk)459 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
460 {
461 if (smu->adev->in_suspend)
462 return;
463
464 if (clk == SMU_MCLK) {
465 smu->user_dpm_profile.clk_dependency = 0;
466 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
467 } else if (clk == SMU_FCLK) {
468 /* MCLK takes precedence over FCLK */
469 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
470 return;
471
472 smu->user_dpm_profile.clk_dependency = 0;
473 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
474 } else if (clk == SMU_SOCCLK) {
475 /* MCLK takes precedence over SOCCLK */
476 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
477 return;
478
479 smu->user_dpm_profile.clk_dependency = 0;
480 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
481 } else
482 /* Add clk dependencies here, if any */
483 return;
484 }
485
486 /**
487 * smu_restore_dpm_user_profile - reinstate user dpm profile
488 *
489 * @smu: smu_context pointer
490 *
491 * Restore the saved user power configurations include power limit,
492 * clock frequencies, fan control mode and fan speed.
493 */
smu_restore_dpm_user_profile(struct smu_context * smu)494 static void smu_restore_dpm_user_profile(struct smu_context *smu)
495 {
496 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
497 int ret = 0;
498
499 if (!smu->adev->in_suspend)
500 return;
501
502 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
503 return;
504
505 /* Enable restore flag */
506 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
507
508 /* set the user dpm power limit */
509 if (smu->user_dpm_profile.power_limit) {
510 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
511 if (ret)
512 dev_err(smu->adev->dev, "Failed to set power limit value\n");
513 }
514
515 /* set the user dpm clock configurations */
516 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
517 enum smu_clk_type clk_type;
518
519 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
520 /*
521 * Iterate over smu clk type and force the saved user clk
522 * configs, skip if clock dependency is enabled
523 */
524 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
525 smu->user_dpm_profile.clk_mask[clk_type]) {
526 ret = smu_force_smuclk_levels(smu, clk_type,
527 smu->user_dpm_profile.clk_mask[clk_type]);
528 if (ret)
529 dev_err(smu->adev->dev,
530 "Failed to set clock type = %d\n", clk_type);
531 }
532 }
533 }
534
535 /* set the user dpm fan configurations */
536 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
537 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
538 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
539 if (ret != -EOPNOTSUPP) {
540 smu->user_dpm_profile.fan_speed_pwm = 0;
541 smu->user_dpm_profile.fan_speed_rpm = 0;
542 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
543 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
544 }
545
546 if (smu->user_dpm_profile.fan_speed_pwm) {
547 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
548 if (ret != -EOPNOTSUPP)
549 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
550 }
551
552 if (smu->user_dpm_profile.fan_speed_rpm) {
553 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
554 if (ret != -EOPNOTSUPP)
555 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
556 }
557 }
558
559 /* Restore user customized OD settings */
560 if (smu->user_dpm_profile.user_od) {
561 if (smu->ppt_funcs->restore_user_od_settings) {
562 ret = smu->ppt_funcs->restore_user_od_settings(smu);
563 if (ret)
564 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
565 }
566 }
567
568 /* Disable restore flag */
569 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
570 }
571
smu_get_power_num_states(void * handle,struct pp_states_info * state_info)572 static int smu_get_power_num_states(void *handle,
573 struct pp_states_info *state_info)
574 {
575 if (!state_info)
576 return -EINVAL;
577
578 /* not support power state */
579 memset(state_info, 0, sizeof(struct pp_states_info));
580 state_info->nums = 1;
581 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
582
583 return 0;
584 }
585
is_support_sw_smu(struct amdgpu_device * adev)586 bool is_support_sw_smu(struct amdgpu_device *adev)
587 {
588 /* vega20 is 11.0.2, but it's supported via the powerplay code */
589 if (adev->asic_type == CHIP_VEGA20)
590 return false;
591
592 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
593 amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
594 return true;
595
596 return false;
597 }
598
is_support_cclk_dpm(struct amdgpu_device * adev)599 bool is_support_cclk_dpm(struct amdgpu_device *adev)
600 {
601 struct smu_context *smu = adev->powerplay.pp_handle;
602
603 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
604 return false;
605
606 return true;
607 }
608
609
smu_sys_get_pp_table(void * handle,char ** table)610 static int smu_sys_get_pp_table(void *handle,
611 char **table)
612 {
613 struct smu_context *smu = handle;
614 struct smu_table_context *smu_table = &smu->smu_table;
615
616 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
617 return -EOPNOTSUPP;
618
619 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
620 return -EINVAL;
621
622 if (smu_table->hardcode_pptable)
623 *table = smu_table->hardcode_pptable;
624 else
625 *table = smu_table->power_play_table;
626
627 return smu_table->power_play_table_size;
628 }
629
smu_sys_set_pp_table(void * handle,const char * buf,size_t size)630 static int smu_sys_set_pp_table(void *handle,
631 const char *buf,
632 size_t size)
633 {
634 struct smu_context *smu = handle;
635 struct smu_table_context *smu_table = &smu->smu_table;
636 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
637 int ret = 0;
638
639 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
640 return -EOPNOTSUPP;
641
642 if (header->usStructureSize != size) {
643 dev_err(smu->adev->dev, "pp table size not matched !\n");
644 return -EIO;
645 }
646
647 if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
648 kfree(smu_table->hardcode_pptable);
649 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
650 if (!smu_table->hardcode_pptable)
651 return -ENOMEM;
652 }
653
654 memcpy(smu_table->hardcode_pptable, buf, size);
655 smu_table->power_play_table = smu_table->hardcode_pptable;
656 smu_table->power_play_table_size = size;
657
658 /*
659 * Special hw_fini action(for Navi1x, the DPMs disablement will be
660 * skipped) may be needed for custom pptable uploading.
661 */
662 smu->uploading_custom_pp_table = true;
663
664 ret = smu_reset(smu);
665 if (ret)
666 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
667
668 smu->uploading_custom_pp_table = false;
669
670 return ret;
671 }
672
smu_get_driver_allowed_feature_mask(struct smu_context * smu)673 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
674 {
675 struct smu_feature *feature = &smu->smu_feature;
676 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
677 int ret = 0;
678
679 /*
680 * With SCPM enabled, the allowed featuremasks setting(via
681 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
682 * That means there is no way to let PMFW knows the settings below.
683 * Thus, we just assume all the features are allowed under
684 * such scenario.
685 */
686 if (smu->adev->scpm_enabled) {
687 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
688 return 0;
689 }
690
691 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
692
693 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
694 SMU_FEATURE_MAX/32);
695 if (ret)
696 return ret;
697
698 bitmap_or(feature->allowed, feature->allowed,
699 (unsigned long *)allowed_feature_mask,
700 feature->feature_num);
701
702 return ret;
703 }
704
smu_set_funcs(struct amdgpu_device * adev)705 static int smu_set_funcs(struct amdgpu_device *adev)
706 {
707 struct smu_context *smu = adev->powerplay.pp_handle;
708
709 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
710 smu->od_enabled = true;
711
712 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
713 case IP_VERSION(11, 0, 0):
714 case IP_VERSION(11, 0, 5):
715 case IP_VERSION(11, 0, 9):
716 navi10_set_ppt_funcs(smu);
717 break;
718 case IP_VERSION(11, 0, 7):
719 case IP_VERSION(11, 0, 11):
720 case IP_VERSION(11, 0, 12):
721 case IP_VERSION(11, 0, 13):
722 sienna_cichlid_set_ppt_funcs(smu);
723 break;
724 case IP_VERSION(12, 0, 0):
725 case IP_VERSION(12, 0, 1):
726 renoir_set_ppt_funcs(smu);
727 break;
728 case IP_VERSION(11, 5, 0):
729 case IP_VERSION(11, 5, 2):
730 vangogh_set_ppt_funcs(smu);
731 break;
732 case IP_VERSION(13, 0, 1):
733 case IP_VERSION(13, 0, 3):
734 case IP_VERSION(13, 0, 8):
735 yellow_carp_set_ppt_funcs(smu);
736 break;
737 case IP_VERSION(13, 0, 4):
738 case IP_VERSION(13, 0, 11):
739 smu_v13_0_4_set_ppt_funcs(smu);
740 break;
741 case IP_VERSION(13, 0, 5):
742 smu_v13_0_5_set_ppt_funcs(smu);
743 break;
744 case IP_VERSION(11, 0, 8):
745 cyan_skillfish_set_ppt_funcs(smu);
746 break;
747 case IP_VERSION(11, 0, 2):
748 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
749 arcturus_set_ppt_funcs(smu);
750 /* OD is not supported on Arcturus */
751 smu->od_enabled = false;
752 break;
753 case IP_VERSION(13, 0, 2):
754 aldebaran_set_ppt_funcs(smu);
755 /* Enable pp_od_clk_voltage node */
756 smu->od_enabled = true;
757 break;
758 case IP_VERSION(13, 0, 0):
759 case IP_VERSION(13, 0, 10):
760 smu_v13_0_0_set_ppt_funcs(smu);
761 break;
762 case IP_VERSION(13, 0, 6):
763 case IP_VERSION(13, 0, 14):
764 case IP_VERSION(13, 0, 12):
765 smu_v13_0_6_set_ppt_funcs(smu);
766 /* Enable pp_od_clk_voltage node */
767 smu->od_enabled = true;
768 break;
769 case IP_VERSION(13, 0, 7):
770 smu_v13_0_7_set_ppt_funcs(smu);
771 break;
772 case IP_VERSION(14, 0, 0):
773 case IP_VERSION(14, 0, 1):
774 case IP_VERSION(14, 0, 4):
775 case IP_VERSION(14, 0, 5):
776 smu_v14_0_0_set_ppt_funcs(smu);
777 break;
778 case IP_VERSION(14, 0, 2):
779 case IP_VERSION(14, 0, 3):
780 smu_v14_0_2_set_ppt_funcs(smu);
781 break;
782 default:
783 return -EINVAL;
784 }
785
786 return 0;
787 }
788
smu_early_init(struct amdgpu_ip_block * ip_block)789 static int smu_early_init(struct amdgpu_ip_block *ip_block)
790 {
791 struct amdgpu_device *adev = ip_block->adev;
792 struct smu_context *smu;
793 int r;
794
795 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
796 if (!smu)
797 return -ENOMEM;
798
799 smu->adev = adev;
800 smu->pm_enabled = !!amdgpu_dpm;
801 smu->is_apu = false;
802 smu->smu_baco.state = SMU_BACO_STATE_NONE;
803 smu->smu_baco.platform_support = false;
804 smu->smu_baco.maco_support = false;
805 smu->user_dpm_profile.fan_mode = -1;
806 smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
807
808 mutex_init(&smu->message_lock);
809
810 adev->powerplay.pp_handle = smu;
811 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
812
813 r = smu_set_funcs(adev);
814 if (r)
815 return r;
816 return smu_init_microcode(smu);
817 }
818
smu_set_default_dpm_table(struct smu_context * smu)819 static int smu_set_default_dpm_table(struct smu_context *smu)
820 {
821 struct amdgpu_device *adev = smu->adev;
822 struct smu_power_context *smu_power = &smu->smu_power;
823 struct smu_power_gate *power_gate = &smu_power->power_gate;
824 int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;
825 int ret = 0;
826
827 if (!smu->ppt_funcs->set_default_dpm_table)
828 return 0;
829
830 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
831 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
832 vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);
833 }
834 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
835 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
836
837 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
838 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
839 ret = smu_dpm_set_vcn_enable(smu, true, i);
840 if (ret)
841 return ret;
842 }
843 }
844
845 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
846 ret = smu_dpm_set_jpeg_enable(smu, true);
847 if (ret)
848 goto err_out;
849 }
850
851 ret = smu->ppt_funcs->set_default_dpm_table(smu);
852 if (ret)
853 dev_err(smu->adev->dev,
854 "Failed to setup default dpm clock tables!\n");
855
856 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
857 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
858 err_out:
859 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
860 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
861 smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);
862 }
863
864 return ret;
865 }
866
smu_apply_default_config_table_settings(struct smu_context * smu)867 static int smu_apply_default_config_table_settings(struct smu_context *smu)
868 {
869 struct amdgpu_device *adev = smu->adev;
870 int ret = 0;
871
872 ret = smu_get_default_config_table_settings(smu,
873 &adev->pm.config_table);
874 if (ret)
875 return ret;
876
877 return smu_set_config_table(smu, &adev->pm.config_table);
878 }
879
smu_late_init(struct amdgpu_ip_block * ip_block)880 static int smu_late_init(struct amdgpu_ip_block *ip_block)
881 {
882 struct amdgpu_device *adev = ip_block->adev;
883 struct smu_context *smu = adev->powerplay.pp_handle;
884 int ret = 0;
885
886 smu_set_fine_grain_gfx_freq_parameters(smu);
887
888 if (!smu->pm_enabled)
889 return 0;
890
891 ret = smu_post_init(smu);
892 if (ret) {
893 dev_err(adev->dev, "Failed to post smu init!\n");
894 return ret;
895 }
896
897 /*
898 * Explicitly notify PMFW the power mode the system in. Since
899 * the PMFW may boot the ASIC with a different mode.
900 * For those supporting ACDC switch via gpio, PMFW will
901 * handle the switch automatically. Driver involvement
902 * is unnecessary.
903 */
904 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
905 smu_set_ac_dc(smu);
906
907 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
908 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
909 return 0;
910
911 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
912 ret = smu_set_default_od_settings(smu);
913 if (ret) {
914 dev_err(adev->dev, "Failed to setup default OD settings!\n");
915 return ret;
916 }
917 }
918
919 ret = smu_populate_umd_state_clk(smu);
920 if (ret) {
921 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
922 return ret;
923 }
924
925 ret = smu_get_asic_power_limits(smu,
926 &smu->current_power_limit,
927 &smu->default_power_limit,
928 &smu->max_power_limit,
929 &smu->min_power_limit);
930 if (ret) {
931 dev_err(adev->dev, "Failed to get asic power limits!\n");
932 return ret;
933 }
934
935 if (!amdgpu_sriov_vf(adev))
936 smu_get_unique_id(smu);
937
938 smu_get_fan_parameters(smu);
939
940 smu_handle_task(smu,
941 smu->smu_dpm.dpm_level,
942 AMD_PP_TASK_COMPLETE_INIT);
943
944 ret = smu_apply_default_config_table_settings(smu);
945 if (ret && (ret != -EOPNOTSUPP)) {
946 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
947 return ret;
948 }
949
950 smu_restore_dpm_user_profile(smu);
951
952 return 0;
953 }
954
smu_init_fb_allocations(struct smu_context * smu)955 static int smu_init_fb_allocations(struct smu_context *smu)
956 {
957 struct amdgpu_device *adev = smu->adev;
958 struct smu_table_context *smu_table = &smu->smu_table;
959 struct smu_table *tables = smu_table->tables;
960 struct smu_table *driver_table = &(smu_table->driver_table);
961 uint32_t max_table_size = 0;
962 int ret, i;
963
964 /* VRAM allocation for tool table */
965 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
966 ret = amdgpu_bo_create_kernel(adev,
967 tables[SMU_TABLE_PMSTATUSLOG].size,
968 tables[SMU_TABLE_PMSTATUSLOG].align,
969 tables[SMU_TABLE_PMSTATUSLOG].domain,
970 &tables[SMU_TABLE_PMSTATUSLOG].bo,
971 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
972 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
973 if (ret) {
974 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
975 return ret;
976 }
977 }
978
979 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
980 /* VRAM allocation for driver table */
981 for (i = 0; i < SMU_TABLE_COUNT; i++) {
982 if (tables[i].size == 0)
983 continue;
984
985 /* If one of the tables has VRAM domain restriction, keep it in
986 * VRAM
987 */
988 if ((tables[i].domain &
989 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
990 AMDGPU_GEM_DOMAIN_VRAM)
991 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
992
993 if (i == SMU_TABLE_PMSTATUSLOG)
994 continue;
995
996 if (max_table_size < tables[i].size)
997 max_table_size = tables[i].size;
998 }
999
1000 driver_table->size = max_table_size;
1001 driver_table->align = PAGE_SIZE;
1002
1003 ret = amdgpu_bo_create_kernel(adev,
1004 driver_table->size,
1005 driver_table->align,
1006 driver_table->domain,
1007 &driver_table->bo,
1008 &driver_table->mc_address,
1009 &driver_table->cpu_addr);
1010 if (ret) {
1011 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
1012 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1013 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1014 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1015 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1016 }
1017
1018 return ret;
1019 }
1020
smu_fini_fb_allocations(struct smu_context * smu)1021 static int smu_fini_fb_allocations(struct smu_context *smu)
1022 {
1023 struct smu_table_context *smu_table = &smu->smu_table;
1024 struct smu_table *tables = smu_table->tables;
1025 struct smu_table *driver_table = &(smu_table->driver_table);
1026
1027 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1028 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1029 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1030 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1031
1032 amdgpu_bo_free_kernel(&driver_table->bo,
1033 &driver_table->mc_address,
1034 &driver_table->cpu_addr);
1035
1036 return 0;
1037 }
1038
smu_update_gpu_addresses(struct smu_context * smu)1039 static void smu_update_gpu_addresses(struct smu_context *smu)
1040 {
1041 struct smu_table_context *smu_table = &smu->smu_table;
1042 struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;
1043 struct smu_table *driver_table = &(smu_table->driver_table);
1044 struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;
1045
1046 if (pm_status_table->bo)
1047 pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);
1048 if (driver_table->bo)
1049 driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);
1050 if (dummy_read_1_table->bo)
1051 dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);
1052 }
1053
1054 /**
1055 * smu_alloc_memory_pool - allocate memory pool in the system memory
1056 *
1057 * @smu: amdgpu_device pointer
1058 *
1059 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1060 * and DramLogSetDramAddr can notify it changed.
1061 *
1062 * Returns 0 on success, error on failure.
1063 */
smu_alloc_memory_pool(struct smu_context * smu)1064 static int smu_alloc_memory_pool(struct smu_context *smu)
1065 {
1066 struct amdgpu_device *adev = smu->adev;
1067 struct smu_table_context *smu_table = &smu->smu_table;
1068 struct smu_table *memory_pool = &smu_table->memory_pool;
1069 uint64_t pool_size = smu->pool_size;
1070 int ret = 0;
1071
1072 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1073 return ret;
1074
1075 memory_pool->size = pool_size;
1076 memory_pool->align = PAGE_SIZE;
1077 memory_pool->domain =
1078 (adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ?
1079 AMDGPU_GEM_DOMAIN_VRAM :
1080 AMDGPU_GEM_DOMAIN_GTT;
1081
1082 switch (pool_size) {
1083 case SMU_MEMORY_POOL_SIZE_256_MB:
1084 case SMU_MEMORY_POOL_SIZE_512_MB:
1085 case SMU_MEMORY_POOL_SIZE_1_GB:
1086 case SMU_MEMORY_POOL_SIZE_2_GB:
1087 ret = amdgpu_bo_create_kernel(adev,
1088 memory_pool->size,
1089 memory_pool->align,
1090 memory_pool->domain,
1091 &memory_pool->bo,
1092 &memory_pool->mc_address,
1093 &memory_pool->cpu_addr);
1094 if (ret)
1095 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1096 break;
1097 default:
1098 break;
1099 }
1100
1101 return ret;
1102 }
1103
smu_free_memory_pool(struct smu_context * smu)1104 static int smu_free_memory_pool(struct smu_context *smu)
1105 {
1106 struct smu_table_context *smu_table = &smu->smu_table;
1107 struct smu_table *memory_pool = &smu_table->memory_pool;
1108
1109 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1110 return 0;
1111
1112 amdgpu_bo_free_kernel(&memory_pool->bo,
1113 &memory_pool->mc_address,
1114 &memory_pool->cpu_addr);
1115
1116 memset(memory_pool, 0, sizeof(struct smu_table));
1117
1118 return 0;
1119 }
1120
smu_alloc_dummy_read_table(struct smu_context * smu)1121 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1122 {
1123 struct smu_table_context *smu_table = &smu->smu_table;
1124 struct smu_table *dummy_read_1_table =
1125 &smu_table->dummy_read_1_table;
1126 struct amdgpu_device *adev = smu->adev;
1127 int ret = 0;
1128
1129 if (!dummy_read_1_table->size)
1130 return 0;
1131
1132 ret = amdgpu_bo_create_kernel(adev,
1133 dummy_read_1_table->size,
1134 dummy_read_1_table->align,
1135 dummy_read_1_table->domain,
1136 &dummy_read_1_table->bo,
1137 &dummy_read_1_table->mc_address,
1138 &dummy_read_1_table->cpu_addr);
1139 if (ret)
1140 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1141
1142 return ret;
1143 }
1144
smu_free_dummy_read_table(struct smu_context * smu)1145 static void smu_free_dummy_read_table(struct smu_context *smu)
1146 {
1147 struct smu_table_context *smu_table = &smu->smu_table;
1148 struct smu_table *dummy_read_1_table =
1149 &smu_table->dummy_read_1_table;
1150
1151
1152 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1153 &dummy_read_1_table->mc_address,
1154 &dummy_read_1_table->cpu_addr);
1155
1156 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1157 }
1158
smu_smc_table_sw_init(struct smu_context * smu)1159 static int smu_smc_table_sw_init(struct smu_context *smu)
1160 {
1161 int ret;
1162
1163 /**
1164 * Create smu_table structure, and init smc tables such as
1165 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1166 */
1167 ret = smu_init_smc_tables(smu);
1168 if (ret) {
1169 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1170 return ret;
1171 }
1172
1173 /**
1174 * Create smu_power_context structure, and allocate smu_dpm_context and
1175 * context size to fill the smu_power_context data.
1176 */
1177 ret = smu_init_power(smu);
1178 if (ret) {
1179 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1180 return ret;
1181 }
1182
1183 /*
1184 * allocate vram bos to store smc table contents.
1185 */
1186 ret = smu_init_fb_allocations(smu);
1187 if (ret)
1188 return ret;
1189
1190 ret = smu_alloc_memory_pool(smu);
1191 if (ret)
1192 return ret;
1193
1194 ret = smu_alloc_dummy_read_table(smu);
1195 if (ret)
1196 return ret;
1197
1198 ret = smu_i2c_init(smu);
1199 if (ret)
1200 return ret;
1201
1202 return 0;
1203 }
1204
smu_smc_table_sw_fini(struct smu_context * smu)1205 static int smu_smc_table_sw_fini(struct smu_context *smu)
1206 {
1207 int ret;
1208
1209 smu_i2c_fini(smu);
1210
1211 smu_free_dummy_read_table(smu);
1212
1213 ret = smu_free_memory_pool(smu);
1214 if (ret)
1215 return ret;
1216
1217 ret = smu_fini_fb_allocations(smu);
1218 if (ret)
1219 return ret;
1220
1221 ret = smu_fini_power(smu);
1222 if (ret) {
1223 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1224 return ret;
1225 }
1226
1227 ret = smu_fini_smc_tables(smu);
1228 if (ret) {
1229 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1230 return ret;
1231 }
1232
1233 return 0;
1234 }
1235
smu_throttling_logging_work_fn(struct work_struct * work)1236 static void smu_throttling_logging_work_fn(struct work_struct *work)
1237 {
1238 struct smu_context *smu = container_of(work, struct smu_context,
1239 throttling_logging_work);
1240
1241 smu_log_thermal_throttling(smu);
1242 }
1243
smu_interrupt_work_fn(struct work_struct * work)1244 static void smu_interrupt_work_fn(struct work_struct *work)
1245 {
1246 struct smu_context *smu = container_of(work, struct smu_context,
1247 interrupt_work);
1248
1249 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1250 smu->ppt_funcs->interrupt_work(smu);
1251 }
1252
smu_swctf_delayed_work_handler(struct work_struct * work)1253 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1254 {
1255 struct smu_context *smu =
1256 container_of(work, struct smu_context, swctf_delayed_work.work);
1257 struct smu_temperature_range *range =
1258 &smu->thermal_range;
1259 struct amdgpu_device *adev = smu->adev;
1260 uint32_t hotspot_tmp, size;
1261
1262 /*
1263 * If the hotspot temperature is confirmed as below SW CTF setting point
1264 * after the delay enforced, nothing will be done.
1265 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1266 */
1267 if (range->software_shutdown_temp &&
1268 smu->ppt_funcs->read_sensor &&
1269 !smu->ppt_funcs->read_sensor(smu,
1270 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1271 &hotspot_tmp,
1272 &size) &&
1273 hotspot_tmp / 1000 < range->software_shutdown_temp)
1274 return;
1275
1276 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1277 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1278 orderly_poweroff(true);
1279 }
1280
smu_init_xgmi_plpd_mode(struct smu_context * smu)1281 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1282 {
1283 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1284 struct smu_dpm_policy_ctxt *policy_ctxt;
1285 struct smu_dpm_policy *policy;
1286
1287 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1288 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1289 if (policy)
1290 policy->current_level = XGMI_PLPD_DEFAULT;
1291 return;
1292 }
1293
1294 /* PMFW put PLPD into default policy after enabling the feature */
1295 if (smu_feature_is_enabled(smu,
1296 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1297 if (policy)
1298 policy->current_level = XGMI_PLPD_DEFAULT;
1299 } else {
1300 policy_ctxt = dpm_ctxt->dpm_policies;
1301 if (policy_ctxt)
1302 policy_ctxt->policy_mask &=
1303 ~BIT(PP_PM_POLICY_XGMI_PLPD);
1304 }
1305 }
1306
smu_init_power_profile(struct smu_context * smu)1307 static void smu_init_power_profile(struct smu_context *smu)
1308 {
1309 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN)
1310 smu->power_profile_mode =
1311 PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1312 smu_power_profile_mode_get(smu, smu->power_profile_mode);
1313 }
1314
smu_sw_init(struct amdgpu_ip_block * ip_block)1315 static int smu_sw_init(struct amdgpu_ip_block *ip_block)
1316 {
1317 struct amdgpu_device *adev = ip_block->adev;
1318 struct smu_context *smu = adev->powerplay.pp_handle;
1319 int i, ret;
1320
1321 smu->pool_size = adev->pm.smu_prv_buffer_size;
1322 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1323 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1324 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1325
1326 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1327 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1328 atomic64_set(&smu->throttle_int_counter, 0);
1329 smu->watermarks_bitmap = 0;
1330
1331 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1332 atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
1333 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1334 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1335 atomic_set(&smu->smu_power.power_gate.isp_gated, 1);
1336 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1337
1338 smu_init_power_profile(smu);
1339 smu->display_config = &adev->pm.pm_display_cfg;
1340
1341 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1342 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1343
1344 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1345 smu_swctf_delayed_work_handler);
1346
1347 ret = smu_smc_table_sw_init(smu);
1348 if (ret) {
1349 dev_err(adev->dev, "Failed to sw init smc table!\n");
1350 return ret;
1351 }
1352
1353 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1354 ret = smu_get_vbios_bootup_values(smu);
1355 if (ret) {
1356 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1357 return ret;
1358 }
1359
1360 ret = smu_init_pptable_microcode(smu);
1361 if (ret) {
1362 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1363 return ret;
1364 }
1365
1366 ret = smu_register_irq_handler(smu);
1367 if (ret) {
1368 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1369 return ret;
1370 }
1371
1372 /* If there is no way to query fan control mode, fan control is not supported */
1373 if (!smu->ppt_funcs->get_fan_control_mode)
1374 smu->adev->pm.no_fan = true;
1375
1376 return 0;
1377 }
1378
smu_sw_fini(struct amdgpu_ip_block * ip_block)1379 static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
1380 {
1381 struct amdgpu_device *adev = ip_block->adev;
1382 struct smu_context *smu = adev->powerplay.pp_handle;
1383 int ret;
1384
1385 ret = smu_smc_table_sw_fini(smu);
1386 if (ret) {
1387 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1388 return ret;
1389 }
1390
1391 if (smu->custom_profile_params) {
1392 kfree(smu->custom_profile_params);
1393 smu->custom_profile_params = NULL;
1394 }
1395
1396 smu_fini_microcode(smu);
1397
1398 return 0;
1399 }
1400
smu_get_thermal_temperature_range(struct smu_context * smu)1401 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1402 {
1403 struct amdgpu_device *adev = smu->adev;
1404 struct smu_temperature_range *range =
1405 &smu->thermal_range;
1406 int ret = 0;
1407
1408 if (!smu->ppt_funcs->get_thermal_temperature_range)
1409 return 0;
1410
1411 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1412 if (ret)
1413 return ret;
1414
1415 adev->pm.dpm.thermal.min_temp = range->min;
1416 adev->pm.dpm.thermal.max_temp = range->max;
1417 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1418 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1419 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1420 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1421 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1422 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1423 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1424
1425 return ret;
1426 }
1427
1428 /**
1429 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1430 *
1431 * @smu: smu_context pointer
1432 *
1433 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1434 * Returns 0 on success, error on failure.
1435 */
smu_wbrf_handle_exclusion_ranges(struct smu_context * smu)1436 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1437 {
1438 struct wbrf_ranges_in_out wbrf_exclusion = {0};
1439 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1440 struct amdgpu_device *adev = smu->adev;
1441 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1442 uint64_t start, end;
1443 int ret, i, j;
1444
1445 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1446 if (ret) {
1447 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1448 return ret;
1449 }
1450
1451 /*
1452 * The exclusion ranges array we got might be filled with holes and duplicate
1453 * entries. For example:
1454 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1455 * We need to do some sortups to eliminate those holes and duplicate entries.
1456 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1457 */
1458 for (i = 0; i < num_of_wbrf_ranges; i++) {
1459 start = wifi_bands[i].start;
1460 end = wifi_bands[i].end;
1461
1462 /* get the last valid entry to fill the intermediate hole */
1463 if (!start && !end) {
1464 for (j = num_of_wbrf_ranges - 1; j > i; j--)
1465 if (wifi_bands[j].start && wifi_bands[j].end)
1466 break;
1467
1468 /* no valid entry left */
1469 if (j <= i)
1470 break;
1471
1472 start = wifi_bands[i].start = wifi_bands[j].start;
1473 end = wifi_bands[i].end = wifi_bands[j].end;
1474 wifi_bands[j].start = 0;
1475 wifi_bands[j].end = 0;
1476 num_of_wbrf_ranges = j;
1477 }
1478
1479 /* eliminate duplicate entries */
1480 for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1481 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1482 wifi_bands[j].start = 0;
1483 wifi_bands[j].end = 0;
1484 }
1485 }
1486 }
1487
1488 /* Send the sorted wifi_bands to PMFW */
1489 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1490 /* Try to set the wifi_bands again */
1491 if (unlikely(ret == -EBUSY)) {
1492 mdelay(5);
1493 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1494 }
1495
1496 return ret;
1497 }
1498
1499 /**
1500 * smu_wbrf_event_handler - handle notify events
1501 *
1502 * @nb: notifier block
1503 * @action: event type
1504 * @_arg: event data
1505 *
1506 * Calls relevant amdgpu function in response to wbrf event
1507 * notification from kernel.
1508 */
smu_wbrf_event_handler(struct notifier_block * nb,unsigned long action,void * _arg)1509 static int smu_wbrf_event_handler(struct notifier_block *nb,
1510 unsigned long action, void *_arg)
1511 {
1512 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1513
1514 switch (action) {
1515 case WBRF_CHANGED:
1516 schedule_delayed_work(&smu->wbrf_delayed_work,
1517 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1518 break;
1519 default:
1520 return NOTIFY_DONE;
1521 }
1522
1523 return NOTIFY_OK;
1524 }
1525
1526 /**
1527 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1528 *
1529 * @work: struct work_struct pointer
1530 *
1531 * Flood is over and driver will consume the latest exclusion ranges.
1532 */
smu_wbrf_delayed_work_handler(struct work_struct * work)1533 static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1534 {
1535 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1536
1537 smu_wbrf_handle_exclusion_ranges(smu);
1538 }
1539
1540 /**
1541 * smu_wbrf_support_check - check wbrf support
1542 *
1543 * @smu: smu_context pointer
1544 *
1545 * Verifies the ACPI interface whether wbrf is supported.
1546 */
smu_wbrf_support_check(struct smu_context * smu)1547 static void smu_wbrf_support_check(struct smu_context *smu)
1548 {
1549 struct amdgpu_device *adev = smu->adev;
1550
1551 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1552 acpi_amd_wbrf_supported_consumer(adev->dev);
1553
1554 if (smu->wbrf_supported)
1555 dev_info(adev->dev, "RF interference mitigation is supported\n");
1556 }
1557
1558 /**
1559 * smu_wbrf_init - init driver wbrf support
1560 *
1561 * @smu: smu_context pointer
1562 *
1563 * Verifies the AMD ACPI interfaces and registers with the wbrf
1564 * notifier chain if wbrf feature is supported.
1565 * Returns 0 on success, error on failure.
1566 */
smu_wbrf_init(struct smu_context * smu)1567 static int smu_wbrf_init(struct smu_context *smu)
1568 {
1569 int ret;
1570
1571 if (!smu->wbrf_supported)
1572 return 0;
1573
1574 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1575
1576 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1577 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1578 if (ret)
1579 return ret;
1580
1581 /*
1582 * Some wifiband exclusion ranges may be already there
1583 * before our driver loaded. To make sure our driver
1584 * is awared of those exclusion ranges.
1585 */
1586 schedule_delayed_work(&smu->wbrf_delayed_work,
1587 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1588
1589 return 0;
1590 }
1591
1592 /**
1593 * smu_wbrf_fini - tear down driver wbrf support
1594 *
1595 * @smu: smu_context pointer
1596 *
1597 * Unregisters with the wbrf notifier chain.
1598 */
smu_wbrf_fini(struct smu_context * smu)1599 static void smu_wbrf_fini(struct smu_context *smu)
1600 {
1601 if (!smu->wbrf_supported)
1602 return;
1603
1604 amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1605
1606 cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1607 }
1608
smu_smc_hw_setup(struct smu_context * smu)1609 static int smu_smc_hw_setup(struct smu_context *smu)
1610 {
1611 struct smu_feature *feature = &smu->smu_feature;
1612 struct amdgpu_device *adev = smu->adev;
1613 uint8_t pcie_gen = 0, pcie_width = 0;
1614 uint64_t features_supported;
1615 int ret = 0;
1616
1617 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1618 case IP_VERSION(11, 0, 7):
1619 case IP_VERSION(11, 0, 11):
1620 case IP_VERSION(11, 5, 0):
1621 case IP_VERSION(11, 5, 2):
1622 case IP_VERSION(11, 0, 12):
1623 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1624 dev_info(adev->dev, "dpm has been enabled\n");
1625 ret = smu_system_features_control(smu, true);
1626 if (ret)
1627 dev_err(adev->dev, "Failed system features control!\n");
1628 return ret;
1629 }
1630 break;
1631 default:
1632 break;
1633 }
1634
1635 ret = smu_init_display_count(smu, 0);
1636 if (ret) {
1637 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1638 return ret;
1639 }
1640
1641 ret = smu_set_driver_table_location(smu);
1642 if (ret) {
1643 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1644 return ret;
1645 }
1646
1647 /*
1648 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1649 */
1650 ret = smu_set_tool_table_location(smu);
1651 if (ret) {
1652 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1653 return ret;
1654 }
1655
1656 /*
1657 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1658 * pool location.
1659 */
1660 ret = smu_notify_memory_pool_location(smu);
1661 if (ret) {
1662 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1663 return ret;
1664 }
1665
1666 /*
1667 * It is assumed the pptable used before runpm is same as
1668 * the one used afterwards. Thus, we can reuse the stored
1669 * copy and do not need to resetup the pptable again.
1670 */
1671 if (!adev->in_runpm) {
1672 ret = smu_setup_pptable(smu);
1673 if (ret) {
1674 dev_err(adev->dev, "Failed to setup pptable!\n");
1675 return ret;
1676 }
1677 }
1678
1679 /* smu_dump_pptable(smu); */
1680
1681 /*
1682 * With SCPM enabled, PSP is responsible for the PPTable transferring
1683 * (to SMU). Driver involvement is not needed and permitted.
1684 */
1685 if (!adev->scpm_enabled) {
1686 /*
1687 * Copy pptable bo in the vram to smc with SMU MSGs such as
1688 * SetDriverDramAddr and TransferTableDram2Smu.
1689 */
1690 ret = smu_write_pptable(smu);
1691 if (ret) {
1692 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1693 return ret;
1694 }
1695 }
1696
1697 /* issue Run*Btc msg */
1698 ret = smu_run_btc(smu);
1699 if (ret)
1700 return ret;
1701
1702 /* Enable UclkShadow on wbrf supported */
1703 if (smu->wbrf_supported) {
1704 ret = smu_enable_uclk_shadow(smu, true);
1705 if (ret) {
1706 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1707 return ret;
1708 }
1709 }
1710
1711 /*
1712 * With SCPM enabled, these actions(and relevant messages) are
1713 * not needed and permitted.
1714 */
1715 if (!adev->scpm_enabled) {
1716 ret = smu_feature_set_allowed_mask(smu);
1717 if (ret) {
1718 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1719 return ret;
1720 }
1721 }
1722
1723 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
1724 pcie_gen = 4;
1725 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1726 pcie_gen = 3;
1727 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1728 pcie_gen = 2;
1729 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1730 pcie_gen = 1;
1731 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1732 pcie_gen = 0;
1733
1734 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1735 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1736 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1737 */
1738 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
1739 pcie_width = 7;
1740 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1741 pcie_width = 6;
1742 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1743 pcie_width = 5;
1744 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1745 pcie_width = 4;
1746 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1747 pcie_width = 3;
1748 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1749 pcie_width = 2;
1750 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1751 pcie_width = 1;
1752 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1753 if (ret) {
1754 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1755 return ret;
1756 }
1757
1758 ret = smu_system_features_control(smu, true);
1759 if (ret) {
1760 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1761 return ret;
1762 }
1763
1764 smu_init_xgmi_plpd_mode(smu);
1765
1766 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1767 if (ret) {
1768 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1769 return ret;
1770 }
1771 bitmap_copy(feature->supported,
1772 (unsigned long *)&features_supported,
1773 feature->feature_num);
1774
1775 if (!smu_is_dpm_running(smu))
1776 dev_info(adev->dev, "dpm has been disabled\n");
1777
1778 /*
1779 * Set initialized values (get from vbios) to dpm tables context such as
1780 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1781 * type of clks.
1782 */
1783 ret = smu_set_default_dpm_table(smu);
1784 if (ret) {
1785 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1786 return ret;
1787 }
1788
1789 ret = smu_get_thermal_temperature_range(smu);
1790 if (ret) {
1791 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1792 return ret;
1793 }
1794
1795 ret = smu_enable_thermal_alert(smu);
1796 if (ret) {
1797 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1798 return ret;
1799 }
1800
1801 ret = smu_notify_display_change(smu);
1802 if (ret) {
1803 dev_err(adev->dev, "Failed to notify display change!\n");
1804 return ret;
1805 }
1806
1807 /*
1808 * Set min deep sleep dce fclk with bootup value from vbios via
1809 * SetMinDeepSleepDcefclk MSG.
1810 */
1811 ret = smu_set_min_dcef_deep_sleep(smu,
1812 smu->smu_table.boot_values.dcefclk / 100);
1813 if (ret) {
1814 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1815 return ret;
1816 }
1817
1818 /* Init wbrf support. Properly setup the notifier */
1819 ret = smu_wbrf_init(smu);
1820 if (ret)
1821 dev_err(adev->dev, "Error during wbrf init call\n");
1822
1823 return ret;
1824 }
1825
smu_start_smc_engine(struct smu_context * smu)1826 static int smu_start_smc_engine(struct smu_context *smu)
1827 {
1828 struct amdgpu_device *adev = smu->adev;
1829 int ret = 0;
1830
1831 if (amdgpu_virt_xgmi_migrate_enabled(adev))
1832 smu_update_gpu_addresses(smu);
1833
1834 smu->smc_fw_state = SMU_FW_INIT;
1835
1836 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1837 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1838 if (smu->ppt_funcs->load_microcode) {
1839 ret = smu->ppt_funcs->load_microcode(smu);
1840 if (ret)
1841 return ret;
1842 }
1843 }
1844 }
1845
1846 if (smu->ppt_funcs->check_fw_status) {
1847 ret = smu->ppt_funcs->check_fw_status(smu);
1848 if (ret) {
1849 dev_err(adev->dev, "SMC is not ready\n");
1850 return ret;
1851 }
1852 }
1853
1854 /*
1855 * Send msg GetDriverIfVersion to check if the return value is equal
1856 * with DRIVER_IF_VERSION of smc header.
1857 */
1858 ret = smu_check_fw_version(smu);
1859 if (ret)
1860 return ret;
1861
1862 return ret;
1863 }
1864
smu_hw_init(struct amdgpu_ip_block * ip_block)1865 static int smu_hw_init(struct amdgpu_ip_block *ip_block)
1866 {
1867 int i, ret;
1868 struct amdgpu_device *adev = ip_block->adev;
1869 struct smu_context *smu = adev->powerplay.pp_handle;
1870
1871 if (amdgpu_sriov_multi_vf_mode(adev)) {
1872 smu->pm_enabled = false;
1873 return 0;
1874 }
1875
1876 ret = smu_start_smc_engine(smu);
1877 if (ret) {
1878 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1879 return ret;
1880 }
1881
1882 /*
1883 * Check whether wbrf is supported. This needs to be done
1884 * before SMU setup starts since part of SMU configuration
1885 * relies on this.
1886 */
1887 smu_wbrf_support_check(smu);
1888
1889 if (smu->is_apu) {
1890 ret = smu_set_gfx_imu_enable(smu);
1891 if (ret)
1892 return ret;
1893 for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1894 smu_dpm_set_vcn_enable(smu, true, i);
1895 smu_dpm_set_jpeg_enable(smu, true);
1896 smu_dpm_set_vpe_enable(smu, true);
1897 smu_dpm_set_umsch_mm_enable(smu, true);
1898 smu_set_mall_enable(smu);
1899 smu_set_gfx_cgpg(smu, true);
1900 }
1901
1902 if (!smu->pm_enabled)
1903 return 0;
1904
1905 ret = smu_get_driver_allowed_feature_mask(smu);
1906 if (ret)
1907 return ret;
1908
1909 ret = smu_smc_hw_setup(smu);
1910 if (ret) {
1911 dev_err(adev->dev, "Failed to setup smc hw!\n");
1912 return ret;
1913 }
1914
1915 /*
1916 * Move maximum sustainable clock retrieving here considering
1917 * 1. It is not needed on resume(from S3).
1918 * 2. DAL settings come between .hw_init and .late_init of SMU.
1919 * And DAL needs to know the maximum sustainable clocks. Thus
1920 * it cannot be put in .late_init().
1921 */
1922 ret = smu_init_max_sustainable_clocks(smu);
1923 if (ret) {
1924 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1925 return ret;
1926 }
1927
1928 adev->pm.dpm_enabled = true;
1929
1930 dev_info(adev->dev, "SMU is initialized successfully!\n");
1931
1932 return 0;
1933 }
1934
smu_disable_dpms(struct smu_context * smu)1935 static int smu_disable_dpms(struct smu_context *smu)
1936 {
1937 struct amdgpu_device *adev = smu->adev;
1938 int ret = 0;
1939 bool use_baco = !smu->is_apu &&
1940 ((amdgpu_in_reset(adev) &&
1941 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1942 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1943
1944 /*
1945 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1946 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1947 */
1948 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1949 case IP_VERSION(13, 0, 0):
1950 case IP_VERSION(13, 0, 7):
1951 case IP_VERSION(13, 0, 10):
1952 case IP_VERSION(14, 0, 2):
1953 case IP_VERSION(14, 0, 3):
1954 return 0;
1955 default:
1956 break;
1957 }
1958
1959 /*
1960 * For custom pptable uploading, skip the DPM features
1961 * disable process on Navi1x ASICs.
1962 * - As the gfx related features are under control of
1963 * RLC on those ASICs. RLC reinitialization will be
1964 * needed to reenable them. That will cost much more
1965 * efforts.
1966 *
1967 * - SMU firmware can handle the DPM reenablement
1968 * properly.
1969 */
1970 if (smu->uploading_custom_pp_table) {
1971 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1972 case IP_VERSION(11, 0, 0):
1973 case IP_VERSION(11, 0, 5):
1974 case IP_VERSION(11, 0, 9):
1975 case IP_VERSION(11, 0, 7):
1976 case IP_VERSION(11, 0, 11):
1977 case IP_VERSION(11, 5, 0):
1978 case IP_VERSION(11, 5, 2):
1979 case IP_VERSION(11, 0, 12):
1980 case IP_VERSION(11, 0, 13):
1981 return 0;
1982 default:
1983 break;
1984 }
1985 }
1986
1987 /*
1988 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1989 * on BACO in. Driver involvement is unnecessary.
1990 */
1991 if (use_baco) {
1992 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1993 case IP_VERSION(11, 0, 7):
1994 case IP_VERSION(11, 0, 0):
1995 case IP_VERSION(11, 0, 5):
1996 case IP_VERSION(11, 0, 9):
1997 case IP_VERSION(13, 0, 7):
1998 return 0;
1999 default:
2000 break;
2001 }
2002 }
2003
2004 /*
2005 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
2006 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
2007 */
2008 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
2009 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
2010 return 0;
2011
2012 /*
2013 * For gpu reset, runpm and hibernation through BACO,
2014 * BACO feature has to be kept enabled.
2015 */
2016 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
2017 ret = smu_disable_all_features_with_exception(smu,
2018 SMU_FEATURE_BACO_BIT);
2019 if (ret)
2020 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
2021 } else {
2022 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
2023 if (!adev->scpm_enabled) {
2024 ret = smu_system_features_control(smu, false);
2025 if (ret)
2026 dev_err(adev->dev, "Failed to disable smu features.\n");
2027 }
2028 }
2029
2030 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
2031 * otherwise SMU will hang while interacting with RLC if RLC is halted
2032 * this is a WA for Vangogh asic which fix the SMU hang issue.
2033 */
2034 ret = smu_notify_rlc_state(smu, false);
2035 if (ret) {
2036 dev_err(adev->dev, "Fail to notify rlc status!\n");
2037 return ret;
2038 }
2039
2040 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
2041 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
2042 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
2043 adev->gfx.rlc.funcs->stop(adev);
2044
2045 return ret;
2046 }
2047
smu_smc_hw_cleanup(struct smu_context * smu)2048 static int smu_smc_hw_cleanup(struct smu_context *smu)
2049 {
2050 struct amdgpu_device *adev = smu->adev;
2051 int ret = 0;
2052
2053 smu_wbrf_fini(smu);
2054
2055 cancel_work_sync(&smu->throttling_logging_work);
2056 cancel_work_sync(&smu->interrupt_work);
2057
2058 ret = smu_disable_thermal_alert(smu);
2059 if (ret) {
2060 dev_err(adev->dev, "Fail to disable thermal alert!\n");
2061 return ret;
2062 }
2063
2064 cancel_delayed_work_sync(&smu->swctf_delayed_work);
2065
2066 ret = smu_disable_dpms(smu);
2067 if (ret) {
2068 dev_err(adev->dev, "Fail to disable dpm features!\n");
2069 return ret;
2070 }
2071
2072 return 0;
2073 }
2074
smu_reset_mp1_state(struct smu_context * smu)2075 static int smu_reset_mp1_state(struct smu_context *smu)
2076 {
2077 struct amdgpu_device *adev = smu->adev;
2078 int ret = 0;
2079
2080 if ((!adev->in_runpm) && (!adev->in_suspend) &&
2081 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2082 IP_VERSION(13, 0, 10) &&
2083 !amdgpu_device_has_display_hardware(adev))
2084 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2085
2086 return ret;
2087 }
2088
smu_hw_fini(struct amdgpu_ip_block * ip_block)2089 static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
2090 {
2091 struct amdgpu_device *adev = ip_block->adev;
2092 struct smu_context *smu = adev->powerplay.pp_handle;
2093 int i, ret;
2094
2095 if (amdgpu_sriov_multi_vf_mode(adev))
2096 return 0;
2097
2098 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2099 smu_dpm_set_vcn_enable(smu, false, i);
2100 adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE;
2101 }
2102 smu_dpm_set_jpeg_enable(smu, false);
2103 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2104 smu_dpm_set_vpe_enable(smu, false);
2105 smu_dpm_set_umsch_mm_enable(smu, false);
2106
2107 if (!smu->pm_enabled)
2108 return 0;
2109
2110 adev->pm.dpm_enabled = false;
2111
2112 ret = smu_smc_hw_cleanup(smu);
2113 if (ret)
2114 return ret;
2115
2116 ret = smu_reset_mp1_state(smu);
2117 if (ret)
2118 return ret;
2119
2120 return 0;
2121 }
2122
smu_late_fini(struct amdgpu_ip_block * ip_block)2123 static void smu_late_fini(struct amdgpu_ip_block *ip_block)
2124 {
2125 struct amdgpu_device *adev = ip_block->adev;
2126 struct smu_context *smu = adev->powerplay.pp_handle;
2127
2128 kfree(smu);
2129 }
2130
smu_reset(struct smu_context * smu)2131 static int smu_reset(struct smu_context *smu)
2132 {
2133 struct amdgpu_device *adev = smu->adev;
2134 struct amdgpu_ip_block *ip_block;
2135 int ret;
2136
2137 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
2138 if (!ip_block)
2139 return -EINVAL;
2140
2141 ret = smu_hw_fini(ip_block);
2142 if (ret)
2143 return ret;
2144
2145 ret = smu_hw_init(ip_block);
2146 if (ret)
2147 return ret;
2148
2149 ret = smu_late_init(ip_block);
2150 if (ret)
2151 return ret;
2152
2153 return 0;
2154 }
2155
smu_suspend(struct amdgpu_ip_block * ip_block)2156 static int smu_suspend(struct amdgpu_ip_block *ip_block)
2157 {
2158 struct amdgpu_device *adev = ip_block->adev;
2159 struct smu_context *smu = adev->powerplay.pp_handle;
2160 int ret;
2161 uint64_t count;
2162
2163 if (amdgpu_sriov_multi_vf_mode(adev))
2164 return 0;
2165
2166 if (!smu->pm_enabled)
2167 return 0;
2168
2169 adev->pm.dpm_enabled = false;
2170
2171 ret = smu_smc_hw_cleanup(smu);
2172 if (ret)
2173 return ret;
2174
2175 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2176
2177 smu_set_gfx_cgpg(smu, false);
2178
2179 /*
2180 * pwfw resets entrycount when device is suspended, so we save the
2181 * last value to be used when we resume to keep it consistent
2182 */
2183 ret = smu_get_entrycount_gfxoff(smu, &count);
2184 if (!ret)
2185 adev->gfx.gfx_off_entrycount = count;
2186
2187 /* clear this on suspend so it will get reprogrammed on resume */
2188 smu->workload_mask = 0;
2189
2190 return 0;
2191 }
2192
smu_resume(struct amdgpu_ip_block * ip_block)2193 static int smu_resume(struct amdgpu_ip_block *ip_block)
2194 {
2195 int ret;
2196 struct amdgpu_device *adev = ip_block->adev;
2197 struct smu_context *smu = adev->powerplay.pp_handle;
2198
2199 if (amdgpu_sriov_multi_vf_mode(adev))
2200 return 0;
2201
2202 if (!smu->pm_enabled)
2203 return 0;
2204
2205 dev_info(adev->dev, "SMU is resuming...\n");
2206
2207 ret = smu_start_smc_engine(smu);
2208 if (ret) {
2209 dev_err(adev->dev, "SMC engine is not correctly up!\n");
2210 return ret;
2211 }
2212
2213 ret = smu_smc_hw_setup(smu);
2214 if (ret) {
2215 dev_err(adev->dev, "Failed to setup smc hw!\n");
2216 return ret;
2217 }
2218
2219 ret = smu_set_gfx_imu_enable(smu);
2220 if (ret)
2221 return ret;
2222
2223 smu_set_gfx_cgpg(smu, true);
2224
2225 smu->disable_uclk_switch = 0;
2226
2227 adev->pm.dpm_enabled = true;
2228
2229 dev_info(adev->dev, "SMU is resumed successfully!\n");
2230
2231 return 0;
2232 }
2233
smu_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)2234 static int smu_display_configuration_change(void *handle,
2235 const struct amd_pp_display_configuration *display_config)
2236 {
2237 struct smu_context *smu = handle;
2238
2239 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2240 return -EOPNOTSUPP;
2241
2242 if (!display_config)
2243 return -EINVAL;
2244
2245 smu_set_min_dcef_deep_sleep(smu,
2246 display_config->min_dcef_deep_sleep_set_clk / 100);
2247
2248 return 0;
2249 }
2250
smu_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)2251 static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2252 enum amd_clockgating_state state)
2253 {
2254 return 0;
2255 }
2256
smu_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)2257 static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block,
2258 enum amd_powergating_state state)
2259 {
2260 return 0;
2261 }
2262
smu_enable_umd_pstate(void * handle,enum amd_dpm_forced_level * level)2263 static int smu_enable_umd_pstate(void *handle,
2264 enum amd_dpm_forced_level *level)
2265 {
2266 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2267 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2268 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2269 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2270
2271 struct smu_context *smu = (struct smu_context*)(handle);
2272 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2273
2274 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2275 return -EINVAL;
2276
2277 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2278 /* enter umd pstate, save current level, disable gfx cg*/
2279 if (*level & profile_mode_mask) {
2280 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2281 smu_gpo_control(smu, false);
2282 smu_gfx_ulv_control(smu, false);
2283 smu_deep_sleep_control(smu, false);
2284 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2285 }
2286 } else {
2287 /* exit umd pstate, restore level, enable gfx cg*/
2288 if (!(*level & profile_mode_mask)) {
2289 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2290 *level = smu_dpm_ctx->saved_dpm_level;
2291 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2292 smu_deep_sleep_control(smu, true);
2293 smu_gfx_ulv_control(smu, true);
2294 smu_gpo_control(smu, true);
2295 }
2296 }
2297
2298 return 0;
2299 }
2300
smu_bump_power_profile_mode(struct smu_context * smu,long * custom_params,u32 custom_params_max_idx)2301 static int smu_bump_power_profile_mode(struct smu_context *smu,
2302 long *custom_params,
2303 u32 custom_params_max_idx)
2304 {
2305 u32 workload_mask = 0;
2306 int i, ret = 0;
2307
2308 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
2309 if (smu->workload_refcount[i])
2310 workload_mask |= 1 << i;
2311 }
2312
2313 if (smu->workload_mask == workload_mask)
2314 return 0;
2315
2316 if (smu->ppt_funcs->set_power_profile_mode)
2317 ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
2318 custom_params,
2319 custom_params_max_idx);
2320
2321 if (!ret)
2322 smu->workload_mask = workload_mask;
2323
2324 return ret;
2325 }
2326
smu_power_profile_mode_get(struct smu_context * smu,enum PP_SMC_POWER_PROFILE profile_mode)2327 static void smu_power_profile_mode_get(struct smu_context *smu,
2328 enum PP_SMC_POWER_PROFILE profile_mode)
2329 {
2330 smu->workload_refcount[profile_mode]++;
2331 }
2332
smu_power_profile_mode_put(struct smu_context * smu,enum PP_SMC_POWER_PROFILE profile_mode)2333 static void smu_power_profile_mode_put(struct smu_context *smu,
2334 enum PP_SMC_POWER_PROFILE profile_mode)
2335 {
2336 if (smu->workload_refcount[profile_mode])
2337 smu->workload_refcount[profile_mode]--;
2338 }
2339
smu_adjust_power_state_dynamic(struct smu_context * smu,enum amd_dpm_forced_level level,bool skip_display_settings)2340 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2341 enum amd_dpm_forced_level level,
2342 bool skip_display_settings)
2343 {
2344 int ret = 0;
2345 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2346
2347 if (!skip_display_settings) {
2348 ret = smu_display_config_changed(smu);
2349 if (ret) {
2350 dev_err(smu->adev->dev, "Failed to change display config!");
2351 return ret;
2352 }
2353 }
2354
2355 ret = smu_apply_clocks_adjust_rules(smu);
2356 if (ret) {
2357 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2358 return ret;
2359 }
2360
2361 if (!skip_display_settings) {
2362 ret = smu_notify_smc_display_config(smu);
2363 if (ret) {
2364 dev_err(smu->adev->dev, "Failed to notify smc display config!");
2365 return ret;
2366 }
2367 }
2368
2369 if (smu_dpm_ctx->dpm_level != level) {
2370 ret = smu_asic_set_performance_level(smu, level);
2371 if (ret) {
2372 if (ret == -EOPNOTSUPP)
2373 dev_info(smu->adev->dev, "set performance level %d not supported",
2374 level);
2375 else
2376 dev_err(smu->adev->dev, "Failed to set performance level %d",
2377 level);
2378 return ret;
2379 }
2380
2381 /* update the saved copy */
2382 smu_dpm_ctx->dpm_level = level;
2383 }
2384
2385 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2386 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2387 smu_bump_power_profile_mode(smu, NULL, 0);
2388
2389 return ret;
2390 }
2391
smu_handle_task(struct smu_context * smu,enum amd_dpm_forced_level level,enum amd_pp_task task_id)2392 static int smu_handle_task(struct smu_context *smu,
2393 enum amd_dpm_forced_level level,
2394 enum amd_pp_task task_id)
2395 {
2396 int ret = 0;
2397
2398 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2399 return -EOPNOTSUPP;
2400
2401 switch (task_id) {
2402 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2403 ret = smu_pre_display_config_changed(smu);
2404 if (ret)
2405 return ret;
2406 ret = smu_adjust_power_state_dynamic(smu, level, false);
2407 break;
2408 case AMD_PP_TASK_COMPLETE_INIT:
2409 ret = smu_adjust_power_state_dynamic(smu, level, true);
2410 break;
2411 case AMD_PP_TASK_READJUST_POWER_STATE:
2412 ret = smu_adjust_power_state_dynamic(smu, level, true);
2413 break;
2414 default:
2415 break;
2416 }
2417
2418 return ret;
2419 }
2420
smu_handle_dpm_task(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)2421 static int smu_handle_dpm_task(void *handle,
2422 enum amd_pp_task task_id,
2423 enum amd_pm_state_type *user_state)
2424 {
2425 struct smu_context *smu = handle;
2426 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2427
2428 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2429
2430 }
2431
smu_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool enable)2432 static int smu_switch_power_profile(void *handle,
2433 enum PP_SMC_POWER_PROFILE type,
2434 bool enable)
2435 {
2436 struct smu_context *smu = handle;
2437 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2438 int ret;
2439
2440 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2441 return -EOPNOTSUPP;
2442
2443 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2444 return -EINVAL;
2445
2446 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2447 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2448 if (enable)
2449 smu_power_profile_mode_get(smu, type);
2450 else
2451 smu_power_profile_mode_put(smu, type);
2452 /* don't switch the active workload when paused */
2453 if (smu->pause_workload)
2454 ret = 0;
2455 else
2456 ret = smu_bump_power_profile_mode(smu, NULL, 0);
2457 if (ret) {
2458 if (enable)
2459 smu_power_profile_mode_put(smu, type);
2460 else
2461 smu_power_profile_mode_get(smu, type);
2462 return ret;
2463 }
2464 }
2465
2466 return 0;
2467 }
2468
smu_pause_power_profile(void * handle,bool pause)2469 static int smu_pause_power_profile(void *handle,
2470 bool pause)
2471 {
2472 struct smu_context *smu = handle;
2473 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2474 u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
2475 int ret;
2476
2477 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2478 return -EOPNOTSUPP;
2479
2480 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2481 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2482 smu->pause_workload = pause;
2483
2484 /* force to bootup default profile */
2485 if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
2486 ret = smu->ppt_funcs->set_power_profile_mode(smu,
2487 workload_mask,
2488 NULL,
2489 0);
2490 else
2491 ret = smu_bump_power_profile_mode(smu, NULL, 0);
2492 return ret;
2493 }
2494
2495 return 0;
2496 }
2497
smu_get_performance_level(void * handle)2498 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2499 {
2500 struct smu_context *smu = handle;
2501 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2502
2503 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2504 return -EOPNOTSUPP;
2505
2506 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2507 return -EINVAL;
2508
2509 return smu_dpm_ctx->dpm_level;
2510 }
2511
smu_force_performance_level(void * handle,enum amd_dpm_forced_level level)2512 static int smu_force_performance_level(void *handle,
2513 enum amd_dpm_forced_level level)
2514 {
2515 struct smu_context *smu = handle;
2516 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2517 int ret = 0;
2518
2519 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2520 return -EOPNOTSUPP;
2521
2522 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2523 return -EINVAL;
2524
2525 ret = smu_enable_umd_pstate(smu, &level);
2526 if (ret)
2527 return ret;
2528
2529 ret = smu_handle_task(smu, level,
2530 AMD_PP_TASK_READJUST_POWER_STATE);
2531
2532 /* reset user dpm clock state */
2533 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2534 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2535 smu->user_dpm_profile.clk_dependency = 0;
2536 }
2537
2538 return ret;
2539 }
2540
smu_set_display_count(void * handle,uint32_t count)2541 static int smu_set_display_count(void *handle, uint32_t count)
2542 {
2543 struct smu_context *smu = handle;
2544
2545 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2546 return -EOPNOTSUPP;
2547
2548 return smu_init_display_count(smu, count);
2549 }
2550
smu_force_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)2551 static int smu_force_smuclk_levels(struct smu_context *smu,
2552 enum smu_clk_type clk_type,
2553 uint32_t mask)
2554 {
2555 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2556 int ret = 0;
2557
2558 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2559 return -EOPNOTSUPP;
2560
2561 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2562 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2563 return -EINVAL;
2564 }
2565
2566 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2567 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2568 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2569 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2570 smu_set_user_clk_dependencies(smu, clk_type);
2571 }
2572 }
2573
2574 return ret;
2575 }
2576
smu_force_ppclk_levels(void * handle,enum pp_clock_type type,uint32_t mask)2577 static int smu_force_ppclk_levels(void *handle,
2578 enum pp_clock_type type,
2579 uint32_t mask)
2580 {
2581 struct smu_context *smu = handle;
2582 enum smu_clk_type clk_type;
2583
2584 switch (type) {
2585 case PP_SCLK:
2586 clk_type = SMU_SCLK; break;
2587 case PP_MCLK:
2588 clk_type = SMU_MCLK; break;
2589 case PP_PCIE:
2590 clk_type = SMU_PCIE; break;
2591 case PP_SOCCLK:
2592 clk_type = SMU_SOCCLK; break;
2593 case PP_FCLK:
2594 clk_type = SMU_FCLK; break;
2595 case PP_DCEFCLK:
2596 clk_type = SMU_DCEFCLK; break;
2597 case PP_VCLK:
2598 clk_type = SMU_VCLK; break;
2599 case PP_VCLK1:
2600 clk_type = SMU_VCLK1; break;
2601 case PP_DCLK:
2602 clk_type = SMU_DCLK; break;
2603 case PP_DCLK1:
2604 clk_type = SMU_DCLK1; break;
2605 case OD_SCLK:
2606 clk_type = SMU_OD_SCLK; break;
2607 case OD_MCLK:
2608 clk_type = SMU_OD_MCLK; break;
2609 case OD_VDDC_CURVE:
2610 clk_type = SMU_OD_VDDC_CURVE; break;
2611 case OD_RANGE:
2612 clk_type = SMU_OD_RANGE; break;
2613 default:
2614 return -EINVAL;
2615 }
2616
2617 return smu_force_smuclk_levels(smu, clk_type, mask);
2618 }
2619
2620 /*
2621 * On system suspending or resetting, the dpm_enabled
2622 * flag will be cleared. So that those SMU services which
2623 * are not supported will be gated.
2624 * However, the mp1 state setting should still be granted
2625 * even if the dpm_enabled cleared.
2626 */
smu_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)2627 static int smu_set_mp1_state(void *handle,
2628 enum pp_mp1_state mp1_state)
2629 {
2630 struct smu_context *smu = handle;
2631 int ret = 0;
2632
2633 if (!smu->pm_enabled)
2634 return -EOPNOTSUPP;
2635
2636 if (smu->ppt_funcs &&
2637 smu->ppt_funcs->set_mp1_state)
2638 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2639
2640 return ret;
2641 }
2642
smu_set_df_cstate(void * handle,enum pp_df_cstate state)2643 static int smu_set_df_cstate(void *handle,
2644 enum pp_df_cstate state)
2645 {
2646 struct smu_context *smu = handle;
2647 int ret = 0;
2648
2649 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2650 return -EOPNOTSUPP;
2651
2652 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2653 return 0;
2654
2655 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2656 if (ret)
2657 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2658
2659 return ret;
2660 }
2661
smu_write_watermarks_table(struct smu_context * smu)2662 int smu_write_watermarks_table(struct smu_context *smu)
2663 {
2664 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2665 return -EOPNOTSUPP;
2666
2667 return smu_set_watermarks_table(smu, NULL);
2668 }
2669
smu_set_watermarks_for_clock_ranges(void * handle,struct pp_smu_wm_range_sets * clock_ranges)2670 static int smu_set_watermarks_for_clock_ranges(void *handle,
2671 struct pp_smu_wm_range_sets *clock_ranges)
2672 {
2673 struct smu_context *smu = handle;
2674
2675 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2676 return -EOPNOTSUPP;
2677
2678 if (smu->disable_watermark)
2679 return 0;
2680
2681 return smu_set_watermarks_table(smu, clock_ranges);
2682 }
2683
smu_set_ac_dc(struct smu_context * smu)2684 int smu_set_ac_dc(struct smu_context *smu)
2685 {
2686 int ret = 0;
2687
2688 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2689 return -EOPNOTSUPP;
2690
2691 /* controlled by firmware */
2692 if (smu->dc_controlled_by_gpio)
2693 return 0;
2694
2695 ret = smu_set_power_source(smu,
2696 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2697 SMU_POWER_SOURCE_DC);
2698 if (ret)
2699 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2700 smu->adev->pm.ac_power ? "AC" : "DC");
2701
2702 return ret;
2703 }
2704
2705 const struct amd_ip_funcs smu_ip_funcs = {
2706 .name = "smu",
2707 .early_init = smu_early_init,
2708 .late_init = smu_late_init,
2709 .sw_init = smu_sw_init,
2710 .sw_fini = smu_sw_fini,
2711 .hw_init = smu_hw_init,
2712 .hw_fini = smu_hw_fini,
2713 .late_fini = smu_late_fini,
2714 .suspend = smu_suspend,
2715 .resume = smu_resume,
2716 .is_idle = NULL,
2717 .check_soft_reset = NULL,
2718 .wait_for_idle = NULL,
2719 .soft_reset = NULL,
2720 .set_clockgating_state = smu_set_clockgating_state,
2721 .set_powergating_state = smu_set_powergating_state,
2722 };
2723
2724 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2725 .type = AMD_IP_BLOCK_TYPE_SMC,
2726 .major = 11,
2727 .minor = 0,
2728 .rev = 0,
2729 .funcs = &smu_ip_funcs,
2730 };
2731
2732 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2733 .type = AMD_IP_BLOCK_TYPE_SMC,
2734 .major = 12,
2735 .minor = 0,
2736 .rev = 0,
2737 .funcs = &smu_ip_funcs,
2738 };
2739
2740 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2741 .type = AMD_IP_BLOCK_TYPE_SMC,
2742 .major = 13,
2743 .minor = 0,
2744 .rev = 0,
2745 .funcs = &smu_ip_funcs,
2746 };
2747
2748 const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2749 .type = AMD_IP_BLOCK_TYPE_SMC,
2750 .major = 14,
2751 .minor = 0,
2752 .rev = 0,
2753 .funcs = &smu_ip_funcs,
2754 };
2755
smu_load_microcode(void * handle)2756 static int smu_load_microcode(void *handle)
2757 {
2758 struct smu_context *smu = handle;
2759 struct amdgpu_device *adev = smu->adev;
2760 int ret = 0;
2761
2762 if (!smu->pm_enabled)
2763 return -EOPNOTSUPP;
2764
2765 /* This should be used for non PSP loading */
2766 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2767 return 0;
2768
2769 if (smu->ppt_funcs->load_microcode) {
2770 ret = smu->ppt_funcs->load_microcode(smu);
2771 if (ret) {
2772 dev_err(adev->dev, "Load microcode failed\n");
2773 return ret;
2774 }
2775 }
2776
2777 if (smu->ppt_funcs->check_fw_status) {
2778 ret = smu->ppt_funcs->check_fw_status(smu);
2779 if (ret) {
2780 dev_err(adev->dev, "SMC is not ready\n");
2781 return ret;
2782 }
2783 }
2784
2785 return ret;
2786 }
2787
smu_set_gfx_cgpg(struct smu_context * smu,bool enabled)2788 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2789 {
2790 int ret = 0;
2791
2792 if (smu->ppt_funcs->set_gfx_cgpg)
2793 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2794
2795 return ret;
2796 }
2797
smu_set_fan_speed_rpm(void * handle,uint32_t speed)2798 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2799 {
2800 struct smu_context *smu = handle;
2801 int ret = 0;
2802
2803 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2804 return -EOPNOTSUPP;
2805
2806 if (!smu->ppt_funcs->set_fan_speed_rpm)
2807 return -EOPNOTSUPP;
2808
2809 if (speed == U32_MAX)
2810 return -EINVAL;
2811
2812 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2813 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2814 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2815 smu->user_dpm_profile.fan_speed_rpm = speed;
2816
2817 /* Override custom PWM setting as they cannot co-exist */
2818 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2819 smu->user_dpm_profile.fan_speed_pwm = 0;
2820 }
2821
2822 return ret;
2823 }
2824
2825 /**
2826 * smu_get_power_limit - Request one of the SMU Power Limits
2827 *
2828 * @handle: pointer to smu context
2829 * @limit: requested limit is written back to this variable
2830 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2831 * @pp_power_type: &pp_power_type type of power
2832 * Return: 0 on success, <0 on error
2833 *
2834 */
smu_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type pp_power_type)2835 int smu_get_power_limit(void *handle,
2836 uint32_t *limit,
2837 enum pp_power_limit_level pp_limit_level,
2838 enum pp_power_type pp_power_type)
2839 {
2840 struct smu_context *smu = handle;
2841 struct amdgpu_device *adev = smu->adev;
2842 enum smu_ppt_limit_level limit_level;
2843 uint32_t limit_type;
2844 int ret = 0;
2845
2846 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2847 return -EOPNOTSUPP;
2848
2849 switch (pp_power_type) {
2850 case PP_PWR_TYPE_SUSTAINED:
2851 limit_type = SMU_DEFAULT_PPT_LIMIT;
2852 break;
2853 case PP_PWR_TYPE_FAST:
2854 limit_type = SMU_FAST_PPT_LIMIT;
2855 break;
2856 default:
2857 return -EOPNOTSUPP;
2858 }
2859
2860 switch (pp_limit_level) {
2861 case PP_PWR_LIMIT_CURRENT:
2862 limit_level = SMU_PPT_LIMIT_CURRENT;
2863 break;
2864 case PP_PWR_LIMIT_DEFAULT:
2865 limit_level = SMU_PPT_LIMIT_DEFAULT;
2866 break;
2867 case PP_PWR_LIMIT_MAX:
2868 limit_level = SMU_PPT_LIMIT_MAX;
2869 break;
2870 case PP_PWR_LIMIT_MIN:
2871 limit_level = SMU_PPT_LIMIT_MIN;
2872 break;
2873 default:
2874 return -EOPNOTSUPP;
2875 }
2876
2877 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2878 if (smu->ppt_funcs->get_ppt_limit)
2879 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2880 } else {
2881 switch (limit_level) {
2882 case SMU_PPT_LIMIT_CURRENT:
2883 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2884 case IP_VERSION(13, 0, 2):
2885 case IP_VERSION(13, 0, 6):
2886 case IP_VERSION(13, 0, 12):
2887 case IP_VERSION(13, 0, 14):
2888 case IP_VERSION(11, 0, 7):
2889 case IP_VERSION(11, 0, 11):
2890 case IP_VERSION(11, 0, 12):
2891 case IP_VERSION(11, 0, 13):
2892 ret = smu_get_asic_power_limits(smu,
2893 &smu->current_power_limit,
2894 NULL, NULL, NULL);
2895 break;
2896 default:
2897 break;
2898 }
2899 *limit = smu->current_power_limit;
2900 break;
2901 case SMU_PPT_LIMIT_DEFAULT:
2902 *limit = smu->default_power_limit;
2903 break;
2904 case SMU_PPT_LIMIT_MAX:
2905 *limit = smu->max_power_limit;
2906 break;
2907 case SMU_PPT_LIMIT_MIN:
2908 *limit = smu->min_power_limit;
2909 break;
2910 default:
2911 return -EINVAL;
2912 }
2913 }
2914
2915 return ret;
2916 }
2917
smu_set_power_limit(void * handle,uint32_t limit)2918 static int smu_set_power_limit(void *handle, uint32_t limit)
2919 {
2920 struct smu_context *smu = handle;
2921 uint32_t limit_type = limit >> 24;
2922 int ret = 0;
2923
2924 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2925 return -EOPNOTSUPP;
2926
2927 limit &= (1<<24)-1;
2928 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2929 if (smu->ppt_funcs->set_power_limit)
2930 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2931
2932 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2933 dev_err(smu->adev->dev,
2934 "New power limit (%d) is out of range [%d,%d]\n",
2935 limit, smu->min_power_limit, smu->max_power_limit);
2936 return -EINVAL;
2937 }
2938
2939 if (!limit)
2940 limit = smu->current_power_limit;
2941
2942 if (smu->ppt_funcs->set_power_limit) {
2943 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2944 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2945 smu->user_dpm_profile.power_limit = limit;
2946 }
2947
2948 return ret;
2949 }
2950
smu_print_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)2951 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2952 {
2953 int ret = 0;
2954
2955 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2956 return -EOPNOTSUPP;
2957
2958 if (smu->ppt_funcs->print_clk_levels)
2959 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2960
2961 return ret;
2962 }
2963
smu_convert_to_smuclk(enum pp_clock_type type)2964 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2965 {
2966 enum smu_clk_type clk_type;
2967
2968 switch (type) {
2969 case PP_SCLK:
2970 clk_type = SMU_SCLK; break;
2971 case PP_MCLK:
2972 clk_type = SMU_MCLK; break;
2973 case PP_PCIE:
2974 clk_type = SMU_PCIE; break;
2975 case PP_SOCCLK:
2976 clk_type = SMU_SOCCLK; break;
2977 case PP_FCLK:
2978 clk_type = SMU_FCLK; break;
2979 case PP_DCEFCLK:
2980 clk_type = SMU_DCEFCLK; break;
2981 case PP_VCLK:
2982 clk_type = SMU_VCLK; break;
2983 case PP_VCLK1:
2984 clk_type = SMU_VCLK1; break;
2985 case PP_DCLK:
2986 clk_type = SMU_DCLK; break;
2987 case PP_DCLK1:
2988 clk_type = SMU_DCLK1; break;
2989 case PP_ISPICLK:
2990 clk_type = SMU_ISPICLK;
2991 break;
2992 case PP_ISPXCLK:
2993 clk_type = SMU_ISPXCLK;
2994 break;
2995 case OD_SCLK:
2996 clk_type = SMU_OD_SCLK; break;
2997 case OD_MCLK:
2998 clk_type = SMU_OD_MCLK; break;
2999 case OD_VDDC_CURVE:
3000 clk_type = SMU_OD_VDDC_CURVE; break;
3001 case OD_RANGE:
3002 clk_type = SMU_OD_RANGE; break;
3003 case OD_VDDGFX_OFFSET:
3004 clk_type = SMU_OD_VDDGFX_OFFSET; break;
3005 case OD_CCLK:
3006 clk_type = SMU_OD_CCLK; break;
3007 case OD_FAN_CURVE:
3008 clk_type = SMU_OD_FAN_CURVE; break;
3009 case OD_ACOUSTIC_LIMIT:
3010 clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
3011 case OD_ACOUSTIC_TARGET:
3012 clk_type = SMU_OD_ACOUSTIC_TARGET; break;
3013 case OD_FAN_TARGET_TEMPERATURE:
3014 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
3015 case OD_FAN_MINIMUM_PWM:
3016 clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
3017 case OD_FAN_ZERO_RPM_ENABLE:
3018 clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;
3019 case OD_FAN_ZERO_RPM_STOP_TEMP:
3020 clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;
3021 default:
3022 clk_type = SMU_CLK_COUNT; break;
3023 }
3024
3025 return clk_type;
3026 }
3027
smu_print_ppclk_levels(void * handle,enum pp_clock_type type,char * buf)3028 static int smu_print_ppclk_levels(void *handle,
3029 enum pp_clock_type type,
3030 char *buf)
3031 {
3032 struct smu_context *smu = handle;
3033 enum smu_clk_type clk_type;
3034
3035 clk_type = smu_convert_to_smuclk(type);
3036 if (clk_type == SMU_CLK_COUNT)
3037 return -EINVAL;
3038
3039 return smu_print_smuclk_levels(smu, clk_type, buf);
3040 }
3041
smu_emit_ppclk_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)3042 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
3043 {
3044 struct smu_context *smu = handle;
3045 enum smu_clk_type clk_type;
3046
3047 clk_type = smu_convert_to_smuclk(type);
3048 if (clk_type == SMU_CLK_COUNT)
3049 return -EINVAL;
3050
3051 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3052 return -EOPNOTSUPP;
3053
3054 if (!smu->ppt_funcs->emit_clk_levels)
3055 return -ENOENT;
3056
3057 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
3058
3059 }
3060
smu_od_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)3061 static int smu_od_edit_dpm_table(void *handle,
3062 enum PP_OD_DPM_TABLE_COMMAND type,
3063 long *input, uint32_t size)
3064 {
3065 struct smu_context *smu = handle;
3066 int ret = 0;
3067
3068 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3069 return -EOPNOTSUPP;
3070
3071 if (smu->ppt_funcs->od_edit_dpm_table) {
3072 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
3073 }
3074
3075 return ret;
3076 }
3077
smu_read_sensor(void * handle,int sensor,void * data,int * size_arg)3078 static int smu_read_sensor(void *handle,
3079 int sensor,
3080 void *data,
3081 int *size_arg)
3082 {
3083 struct smu_context *smu = handle;
3084 struct amdgpu_device *adev = smu->adev;
3085 struct smu_umd_pstate_table *pstate_table =
3086 &smu->pstate_table;
3087 int i, ret = 0;
3088 uint32_t *size, size_val;
3089
3090 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3091 return -EOPNOTSUPP;
3092
3093 if (!data || !size_arg)
3094 return -EINVAL;
3095
3096 size_val = *size_arg;
3097 size = &size_val;
3098
3099 if (smu->ppt_funcs->read_sensor)
3100 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
3101 goto unlock;
3102
3103 switch (sensor) {
3104 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
3105 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
3106 *size = 4;
3107 break;
3108 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
3109 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
3110 *size = 4;
3111 break;
3112 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
3113 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
3114 *size = 4;
3115 break;
3116 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
3117 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
3118 *size = 4;
3119 break;
3120 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3121 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
3122 *size = 8;
3123 break;
3124 case AMDGPU_PP_SENSOR_UVD_POWER:
3125 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
3126 *size = 4;
3127 break;
3128 case AMDGPU_PP_SENSOR_VCE_POWER:
3129 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
3130 *size = 4;
3131 break;
3132 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
3133 *(uint32_t *)data = 0;
3134 for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
3135 if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {
3136 *(uint32_t *)data = 1;
3137 break;
3138 }
3139 }
3140 *size = 4;
3141 break;
3142 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
3143 *(uint32_t *)data = 0;
3144 *size = 4;
3145 break;
3146 default:
3147 *size = 0;
3148 ret = -EOPNOTSUPP;
3149 break;
3150 }
3151
3152 unlock:
3153 // assign uint32_t to int
3154 *size_arg = size_val;
3155
3156 return ret;
3157 }
3158
smu_get_apu_thermal_limit(void * handle,uint32_t * limit)3159 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
3160 {
3161 int ret = -EOPNOTSUPP;
3162 struct smu_context *smu = handle;
3163
3164 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3165 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3166
3167 return ret;
3168 }
3169
smu_set_apu_thermal_limit(void * handle,uint32_t limit)3170 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3171 {
3172 int ret = -EOPNOTSUPP;
3173 struct smu_context *smu = handle;
3174
3175 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3176 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3177
3178 return ret;
3179 }
3180
smu_get_power_profile_mode(void * handle,char * buf)3181 static int smu_get_power_profile_mode(void *handle, char *buf)
3182 {
3183 struct smu_context *smu = handle;
3184
3185 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3186 !smu->ppt_funcs->get_power_profile_mode)
3187 return -EOPNOTSUPP;
3188 if (!buf)
3189 return -EINVAL;
3190
3191 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3192 }
3193
smu_set_power_profile_mode(void * handle,long * param,uint32_t param_size)3194 static int smu_set_power_profile_mode(void *handle,
3195 long *param,
3196 uint32_t param_size)
3197 {
3198 struct smu_context *smu = handle;
3199 bool custom = false;
3200 int ret = 0;
3201
3202 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3203 !smu->ppt_funcs->set_power_profile_mode)
3204 return -EOPNOTSUPP;
3205
3206 if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
3207 custom = true;
3208 /* clear frontend mask so custom changes propogate */
3209 smu->workload_mask = 0;
3210 }
3211
3212 if ((param[param_size] != smu->power_profile_mode) || custom) {
3213 /* clear the old user preference */
3214 smu_power_profile_mode_put(smu, smu->power_profile_mode);
3215 /* set the new user preference */
3216 smu_power_profile_mode_get(smu, param[param_size]);
3217 ret = smu_bump_power_profile_mode(smu,
3218 custom ? param : NULL,
3219 custom ? param_size : 0);
3220 if (ret)
3221 smu_power_profile_mode_put(smu, param[param_size]);
3222 else
3223 /* store the user's preference */
3224 smu->power_profile_mode = param[param_size];
3225 }
3226
3227 return ret;
3228 }
3229
smu_get_fan_control_mode(void * handle,u32 * fan_mode)3230 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3231 {
3232 struct smu_context *smu = handle;
3233
3234 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3235 return -EOPNOTSUPP;
3236
3237 if (!smu->ppt_funcs->get_fan_control_mode)
3238 return -EOPNOTSUPP;
3239
3240 if (!fan_mode)
3241 return -EINVAL;
3242
3243 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3244
3245 return 0;
3246 }
3247
smu_set_fan_control_mode(void * handle,u32 value)3248 static int smu_set_fan_control_mode(void *handle, u32 value)
3249 {
3250 struct smu_context *smu = handle;
3251 int ret = 0;
3252
3253 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3254 return -EOPNOTSUPP;
3255
3256 if (!smu->ppt_funcs->set_fan_control_mode)
3257 return -EOPNOTSUPP;
3258
3259 if (value == U32_MAX)
3260 return -EINVAL;
3261
3262 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3263 if (ret)
3264 goto out;
3265
3266 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3267 smu->user_dpm_profile.fan_mode = value;
3268
3269 /* reset user dpm fan speed */
3270 if (value != AMD_FAN_CTRL_MANUAL) {
3271 smu->user_dpm_profile.fan_speed_pwm = 0;
3272 smu->user_dpm_profile.fan_speed_rpm = 0;
3273 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3274 }
3275 }
3276
3277 out:
3278 return ret;
3279 }
3280
smu_get_fan_speed_pwm(void * handle,u32 * speed)3281 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3282 {
3283 struct smu_context *smu = handle;
3284 int ret = 0;
3285
3286 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3287 return -EOPNOTSUPP;
3288
3289 if (!smu->ppt_funcs->get_fan_speed_pwm)
3290 return -EOPNOTSUPP;
3291
3292 if (!speed)
3293 return -EINVAL;
3294
3295 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3296
3297 return ret;
3298 }
3299
smu_set_fan_speed_pwm(void * handle,u32 speed)3300 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3301 {
3302 struct smu_context *smu = handle;
3303 int ret = 0;
3304
3305 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3306 return -EOPNOTSUPP;
3307
3308 if (!smu->ppt_funcs->set_fan_speed_pwm)
3309 return -EOPNOTSUPP;
3310
3311 if (speed == U32_MAX)
3312 return -EINVAL;
3313
3314 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3315 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3316 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3317 smu->user_dpm_profile.fan_speed_pwm = speed;
3318
3319 /* Override custom RPM setting as they cannot co-exist */
3320 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3321 smu->user_dpm_profile.fan_speed_rpm = 0;
3322 }
3323
3324 return ret;
3325 }
3326
smu_get_fan_speed_rpm(void * handle,uint32_t * speed)3327 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3328 {
3329 struct smu_context *smu = handle;
3330 int ret = 0;
3331
3332 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3333 return -EOPNOTSUPP;
3334
3335 if (!smu->ppt_funcs->get_fan_speed_rpm)
3336 return -EOPNOTSUPP;
3337
3338 if (!speed)
3339 return -EINVAL;
3340
3341 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3342
3343 return ret;
3344 }
3345
smu_set_deep_sleep_dcefclk(void * handle,uint32_t clk)3346 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3347 {
3348 struct smu_context *smu = handle;
3349
3350 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3351 return -EOPNOTSUPP;
3352
3353 return smu_set_min_dcef_deep_sleep(smu, clk);
3354 }
3355
smu_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)3356 static int smu_get_clock_by_type_with_latency(void *handle,
3357 enum amd_pp_clock_type type,
3358 struct pp_clock_levels_with_latency *clocks)
3359 {
3360 struct smu_context *smu = handle;
3361 enum smu_clk_type clk_type;
3362 int ret = 0;
3363
3364 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3365 return -EOPNOTSUPP;
3366
3367 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3368 switch (type) {
3369 case amd_pp_sys_clock:
3370 clk_type = SMU_GFXCLK;
3371 break;
3372 case amd_pp_mem_clock:
3373 clk_type = SMU_MCLK;
3374 break;
3375 case amd_pp_dcef_clock:
3376 clk_type = SMU_DCEFCLK;
3377 break;
3378 case amd_pp_disp_clock:
3379 clk_type = SMU_DISPCLK;
3380 break;
3381 default:
3382 dev_err(smu->adev->dev, "Invalid clock type!\n");
3383 return -EINVAL;
3384 }
3385
3386 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3387 }
3388
3389 return ret;
3390 }
3391
smu_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock_req)3392 static int smu_display_clock_voltage_request(void *handle,
3393 struct pp_display_clock_request *clock_req)
3394 {
3395 struct smu_context *smu = handle;
3396 int ret = 0;
3397
3398 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3399 return -EOPNOTSUPP;
3400
3401 if (smu->ppt_funcs->display_clock_voltage_request)
3402 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3403
3404 return ret;
3405 }
3406
3407
smu_display_disable_memory_clock_switch(void * handle,bool disable_memory_clock_switch)3408 static int smu_display_disable_memory_clock_switch(void *handle,
3409 bool disable_memory_clock_switch)
3410 {
3411 struct smu_context *smu = handle;
3412 int ret = -EINVAL;
3413
3414 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3415 return -EOPNOTSUPP;
3416
3417 if (smu->ppt_funcs->display_disable_memory_clock_switch)
3418 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3419
3420 return ret;
3421 }
3422
smu_set_xgmi_pstate(void * handle,uint32_t pstate)3423 static int smu_set_xgmi_pstate(void *handle,
3424 uint32_t pstate)
3425 {
3426 struct smu_context *smu = handle;
3427 int ret = 0;
3428
3429 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3430 return -EOPNOTSUPP;
3431
3432 if (smu->ppt_funcs->set_xgmi_pstate)
3433 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3434
3435 if (ret)
3436 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3437
3438 return ret;
3439 }
3440
smu_get_baco_capability(void * handle)3441 static int smu_get_baco_capability(void *handle)
3442 {
3443 struct smu_context *smu = handle;
3444
3445 if (!smu->pm_enabled)
3446 return false;
3447
3448 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3449 return false;
3450
3451 return smu->ppt_funcs->get_bamaco_support(smu);
3452 }
3453
smu_baco_set_state(void * handle,int state)3454 static int smu_baco_set_state(void *handle, int state)
3455 {
3456 struct smu_context *smu = handle;
3457 int ret = 0;
3458
3459 if (!smu->pm_enabled)
3460 return -EOPNOTSUPP;
3461
3462 if (state == 0) {
3463 if (smu->ppt_funcs->baco_exit)
3464 ret = smu->ppt_funcs->baco_exit(smu);
3465 } else if (state == 1) {
3466 if (smu->ppt_funcs->baco_enter)
3467 ret = smu->ppt_funcs->baco_enter(smu);
3468 } else {
3469 return -EINVAL;
3470 }
3471
3472 if (ret)
3473 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3474 (state)?"enter":"exit");
3475
3476 return ret;
3477 }
3478
smu_mode1_reset_is_support(struct smu_context * smu)3479 bool smu_mode1_reset_is_support(struct smu_context *smu)
3480 {
3481 bool ret = false;
3482
3483 if (!smu->pm_enabled)
3484 return false;
3485
3486 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3487 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3488
3489 return ret;
3490 }
3491
smu_link_reset_is_support(struct smu_context * smu)3492 bool smu_link_reset_is_support(struct smu_context *smu)
3493 {
3494 bool ret = false;
3495
3496 if (!smu->pm_enabled)
3497 return false;
3498
3499 if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
3500 ret = smu->ppt_funcs->link_reset_is_support(smu);
3501
3502 return ret;
3503 }
3504
smu_mode1_reset(struct smu_context * smu)3505 int smu_mode1_reset(struct smu_context *smu)
3506 {
3507 int ret = 0;
3508
3509 if (!smu->pm_enabled)
3510 return -EOPNOTSUPP;
3511
3512 if (smu->ppt_funcs->mode1_reset)
3513 ret = smu->ppt_funcs->mode1_reset(smu);
3514
3515 return ret;
3516 }
3517
smu_mode2_reset(void * handle)3518 static int smu_mode2_reset(void *handle)
3519 {
3520 struct smu_context *smu = handle;
3521 int ret = 0;
3522
3523 if (!smu->pm_enabled)
3524 return -EOPNOTSUPP;
3525
3526 if (smu->ppt_funcs->mode2_reset)
3527 ret = smu->ppt_funcs->mode2_reset(smu);
3528
3529 if (ret)
3530 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3531
3532 return ret;
3533 }
3534
smu_link_reset(struct smu_context * smu)3535 int smu_link_reset(struct smu_context *smu)
3536 {
3537 int ret = 0;
3538
3539 if (!smu->pm_enabled)
3540 return -EOPNOTSUPP;
3541
3542 if (smu->ppt_funcs->link_reset)
3543 ret = smu->ppt_funcs->link_reset(smu);
3544
3545 return ret;
3546 }
3547
smu_enable_gfx_features(void * handle)3548 static int smu_enable_gfx_features(void *handle)
3549 {
3550 struct smu_context *smu = handle;
3551 int ret = 0;
3552
3553 if (!smu->pm_enabled)
3554 return -EOPNOTSUPP;
3555
3556 if (smu->ppt_funcs->enable_gfx_features)
3557 ret = smu->ppt_funcs->enable_gfx_features(smu);
3558
3559 if (ret)
3560 dev_err(smu->adev->dev, "enable gfx features failed!\n");
3561
3562 return ret;
3563 }
3564
smu_get_max_sustainable_clocks_by_dc(void * handle,struct pp_smu_nv_clock_table * max_clocks)3565 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3566 struct pp_smu_nv_clock_table *max_clocks)
3567 {
3568 struct smu_context *smu = handle;
3569 int ret = 0;
3570
3571 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3572 return -EOPNOTSUPP;
3573
3574 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3575 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3576
3577 return ret;
3578 }
3579
smu_get_uclk_dpm_states(void * handle,unsigned int * clock_values_in_khz,unsigned int * num_states)3580 static int smu_get_uclk_dpm_states(void *handle,
3581 unsigned int *clock_values_in_khz,
3582 unsigned int *num_states)
3583 {
3584 struct smu_context *smu = handle;
3585 int ret = 0;
3586
3587 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3588 return -EOPNOTSUPP;
3589
3590 if (smu->ppt_funcs->get_uclk_dpm_states)
3591 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3592
3593 return ret;
3594 }
3595
smu_get_current_power_state(void * handle)3596 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3597 {
3598 struct smu_context *smu = handle;
3599 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3600
3601 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3602 return -EOPNOTSUPP;
3603
3604 if (smu->ppt_funcs->get_current_power_state)
3605 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3606
3607 return pm_state;
3608 }
3609
smu_get_dpm_clock_table(void * handle,struct dpm_clocks * clock_table)3610 static int smu_get_dpm_clock_table(void *handle,
3611 struct dpm_clocks *clock_table)
3612 {
3613 struct smu_context *smu = handle;
3614 int ret = 0;
3615
3616 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3617 return -EOPNOTSUPP;
3618
3619 if (smu->ppt_funcs->get_dpm_clock_table)
3620 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3621
3622 return ret;
3623 }
3624
smu_sys_get_gpu_metrics(void * handle,void ** table)3625 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3626 {
3627 struct smu_context *smu = handle;
3628
3629 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3630 return -EOPNOTSUPP;
3631
3632 if (!smu->ppt_funcs->get_gpu_metrics)
3633 return -EOPNOTSUPP;
3634
3635 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3636 }
3637
smu_sys_get_pm_metrics(void * handle,void * pm_metrics,size_t size)3638 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3639 size_t size)
3640 {
3641 struct smu_context *smu = handle;
3642
3643 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3644 return -EOPNOTSUPP;
3645
3646 if (!smu->ppt_funcs->get_pm_metrics)
3647 return -EOPNOTSUPP;
3648
3649 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3650 }
3651
smu_enable_mgpu_fan_boost(void * handle)3652 static int smu_enable_mgpu_fan_boost(void *handle)
3653 {
3654 struct smu_context *smu = handle;
3655 int ret = 0;
3656
3657 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3658 return -EOPNOTSUPP;
3659
3660 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3661 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3662
3663 return ret;
3664 }
3665
smu_gfx_state_change_set(void * handle,uint32_t state)3666 static int smu_gfx_state_change_set(void *handle,
3667 uint32_t state)
3668 {
3669 struct smu_context *smu = handle;
3670 int ret = 0;
3671
3672 if (smu->ppt_funcs->gfx_state_change_set)
3673 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3674
3675 return ret;
3676 }
3677
smu_handle_passthrough_sbr(struct smu_context * smu,bool enable)3678 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3679 {
3680 int ret = 0;
3681
3682 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3683 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3684
3685 return ret;
3686 }
3687
smu_get_ecc_info(struct smu_context * smu,void * umc_ecc)3688 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3689 {
3690 int ret = -EOPNOTSUPP;
3691
3692 if (smu->ppt_funcs &&
3693 smu->ppt_funcs->get_ecc_info)
3694 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3695
3696 return ret;
3697
3698 }
3699
smu_get_prv_buffer_details(void * handle,void ** addr,size_t * size)3700 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3701 {
3702 struct smu_context *smu = handle;
3703 struct smu_table_context *smu_table = &smu->smu_table;
3704 struct smu_table *memory_pool = &smu_table->memory_pool;
3705
3706 if (!addr || !size)
3707 return -EINVAL;
3708
3709 *addr = NULL;
3710 *size = 0;
3711 if (memory_pool->bo) {
3712 *addr = memory_pool->cpu_addr;
3713 *size = memory_pool->size;
3714 }
3715
3716 return 0;
3717 }
3718
smu_print_dpm_policy(struct smu_dpm_policy * policy,char * sysbuf,size_t * size)3719 static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3720 size_t *size)
3721 {
3722 size_t offset = *size;
3723 int level;
3724
3725 for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3726 if (level == policy->current_level)
3727 offset += sysfs_emit_at(sysbuf, offset,
3728 "%d : %s*\n", level,
3729 policy->desc->get_desc(policy, level));
3730 else
3731 offset += sysfs_emit_at(sysbuf, offset,
3732 "%d : %s\n", level,
3733 policy->desc->get_desc(policy, level));
3734 }
3735
3736 *size = offset;
3737 }
3738
smu_get_pm_policy_info(struct smu_context * smu,enum pp_pm_policy p_type,char * sysbuf)3739 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3740 enum pp_pm_policy p_type, char *sysbuf)
3741 {
3742 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3743 struct smu_dpm_policy_ctxt *policy_ctxt;
3744 struct smu_dpm_policy *dpm_policy;
3745 size_t offset = 0;
3746
3747 policy_ctxt = dpm_ctxt->dpm_policies;
3748 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3749 !policy_ctxt->policy_mask)
3750 return -EOPNOTSUPP;
3751
3752 if (p_type == PP_PM_POLICY_NONE)
3753 return -EINVAL;
3754
3755 dpm_policy = smu_get_pm_policy(smu, p_type);
3756 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3757 return -ENOENT;
3758
3759 if (!sysbuf)
3760 return -EINVAL;
3761
3762 smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3763
3764 return offset;
3765 }
3766
smu_get_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type)3767 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3768 enum pp_pm_policy p_type)
3769 {
3770 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3771 struct smu_dpm_policy_ctxt *policy_ctxt;
3772 int i;
3773
3774 policy_ctxt = dpm_ctxt->dpm_policies;
3775 if (!policy_ctxt)
3776 return NULL;
3777
3778 for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3779 if (policy_ctxt->policies[i].policy_type == p_type)
3780 return &policy_ctxt->policies[i];
3781 }
3782
3783 return NULL;
3784 }
3785
smu_set_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type,int level)3786 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3787 int level)
3788 {
3789 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3790 struct smu_dpm_policy *dpm_policy = NULL;
3791 struct smu_dpm_policy_ctxt *policy_ctxt;
3792 int ret = -EOPNOTSUPP;
3793
3794 policy_ctxt = dpm_ctxt->dpm_policies;
3795 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3796 !policy_ctxt->policy_mask)
3797 return ret;
3798
3799 if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3800 return -EINVAL;
3801
3802 dpm_policy = smu_get_pm_policy(smu, p_type);
3803
3804 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3805 return ret;
3806
3807 if (dpm_policy->current_level == level)
3808 return 0;
3809
3810 ret = dpm_policy->set_policy(smu, level);
3811
3812 if (!ret)
3813 dpm_policy->current_level = level;
3814
3815 return ret;
3816 }
3817
smu_sys_get_xcp_metrics(void * handle,int xcp_id,void * table)3818 static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
3819 {
3820 struct smu_context *smu = handle;
3821
3822 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3823 return -EOPNOTSUPP;
3824
3825 if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
3826 return -EOPNOTSUPP;
3827
3828 return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
3829 }
3830
3831 static const struct amd_pm_funcs swsmu_pm_funcs = {
3832 /* export for sysfs */
3833 .set_fan_control_mode = smu_set_fan_control_mode,
3834 .get_fan_control_mode = smu_get_fan_control_mode,
3835 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3836 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3837 .force_clock_level = smu_force_ppclk_levels,
3838 .print_clock_levels = smu_print_ppclk_levels,
3839 .emit_clock_levels = smu_emit_ppclk_levels,
3840 .force_performance_level = smu_force_performance_level,
3841 .read_sensor = smu_read_sensor,
3842 .get_apu_thermal_limit = smu_get_apu_thermal_limit,
3843 .set_apu_thermal_limit = smu_set_apu_thermal_limit,
3844 .get_performance_level = smu_get_performance_level,
3845 .get_current_power_state = smu_get_current_power_state,
3846 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3847 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3848 .get_pp_num_states = smu_get_power_num_states,
3849 .get_pp_table = smu_sys_get_pp_table,
3850 .set_pp_table = smu_sys_set_pp_table,
3851 .switch_power_profile = smu_switch_power_profile,
3852 .pause_power_profile = smu_pause_power_profile,
3853 /* export to amdgpu */
3854 .dispatch_tasks = smu_handle_dpm_task,
3855 .load_firmware = smu_load_microcode,
3856 .set_powergating_by_smu = smu_dpm_set_power_gate,
3857 .set_power_limit = smu_set_power_limit,
3858 .get_power_limit = smu_get_power_limit,
3859 .get_power_profile_mode = smu_get_power_profile_mode,
3860 .set_power_profile_mode = smu_set_power_profile_mode,
3861 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3862 .set_mp1_state = smu_set_mp1_state,
3863 .gfx_state_change_set = smu_gfx_state_change_set,
3864 /* export to DC */
3865 .get_sclk = smu_get_sclk,
3866 .get_mclk = smu_get_mclk,
3867 .display_configuration_change = smu_display_configuration_change,
3868 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3869 .display_clock_voltage_request = smu_display_clock_voltage_request,
3870 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3871 .set_active_display_count = smu_set_display_count,
3872 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3873 .get_asic_baco_capability = smu_get_baco_capability,
3874 .set_asic_baco_state = smu_baco_set_state,
3875 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3876 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3877 .asic_reset_mode_2 = smu_mode2_reset,
3878 .asic_reset_enable_gfx_features = smu_enable_gfx_features,
3879 .set_df_cstate = smu_set_df_cstate,
3880 .set_xgmi_pstate = smu_set_xgmi_pstate,
3881 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3882 .get_pm_metrics = smu_sys_get_pm_metrics,
3883 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3884 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3885 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3886 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3887 .get_dpm_clock_table = smu_get_dpm_clock_table,
3888 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3889 .get_xcp_metrics = smu_sys_get_xcp_metrics,
3890 };
3891
smu_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)3892 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3893 uint64_t event_arg)
3894 {
3895 int ret = -EINVAL;
3896
3897 if (smu->ppt_funcs->wait_for_event)
3898 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3899
3900 return ret;
3901 }
3902
smu_stb_collect_info(struct smu_context * smu,void * buf,uint32_t size)3903 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3904 {
3905
3906 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3907 return -EOPNOTSUPP;
3908
3909 /* Confirm the buffer allocated is of correct size */
3910 if (size != smu->stb_context.stb_buf_size)
3911 return -EINVAL;
3912
3913 /*
3914 * No need to lock smu mutex as we access STB directly through MMIO
3915 * and not going through SMU messaging route (for now at least).
3916 * For registers access rely on implementation internal locking.
3917 */
3918 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3919 }
3920
3921 #if defined(CONFIG_DEBUG_FS)
3922
smu_stb_debugfs_open(struct inode * inode,struct file * filp)3923 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3924 {
3925 struct amdgpu_device *adev = filp->f_inode->i_private;
3926 struct smu_context *smu = adev->powerplay.pp_handle;
3927 unsigned char *buf;
3928 int r;
3929
3930 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3931 if (!buf)
3932 return -ENOMEM;
3933
3934 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3935 if (r)
3936 goto out;
3937
3938 filp->private_data = buf;
3939
3940 return 0;
3941
3942 out:
3943 kvfree(buf);
3944 return r;
3945 }
3946
smu_stb_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)3947 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3948 loff_t *pos)
3949 {
3950 struct amdgpu_device *adev = filp->f_inode->i_private;
3951 struct smu_context *smu = adev->powerplay.pp_handle;
3952
3953
3954 if (!filp->private_data)
3955 return -EINVAL;
3956
3957 return simple_read_from_buffer(buf,
3958 size,
3959 pos, filp->private_data,
3960 smu->stb_context.stb_buf_size);
3961 }
3962
smu_stb_debugfs_release(struct inode * inode,struct file * filp)3963 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3964 {
3965 kvfree(filp->private_data);
3966 filp->private_data = NULL;
3967
3968 return 0;
3969 }
3970
3971 /*
3972 * We have to define not only read method but also
3973 * open and release because .read takes up to PAGE_SIZE
3974 * data each time so and so is invoked multiple times.
3975 * We allocate the STB buffer in .open and release it
3976 * in .release
3977 */
3978 static const struct file_operations smu_stb_debugfs_fops = {
3979 .owner = THIS_MODULE,
3980 .open = smu_stb_debugfs_open,
3981 .read = smu_stb_debugfs_read,
3982 .release = smu_stb_debugfs_release,
3983 .llseek = default_llseek,
3984 };
3985
3986 #endif
3987
amdgpu_smu_stb_debug_fs_init(struct amdgpu_device * adev)3988 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3989 {
3990 #if defined(CONFIG_DEBUG_FS)
3991
3992 struct smu_context *smu = adev->powerplay.pp_handle;
3993
3994 if (!smu || (!smu->stb_context.stb_buf_size))
3995 return;
3996
3997 debugfs_create_file_size("amdgpu_smu_stb_dump",
3998 S_IRUSR,
3999 adev_to_drm(adev)->primary->debugfs_root,
4000 adev,
4001 &smu_stb_debugfs_fops,
4002 smu->stb_context.stb_buf_size);
4003 #endif
4004 }
4005
smu_send_hbm_bad_pages_num(struct smu_context * smu,uint32_t size)4006 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
4007 {
4008 int ret = 0;
4009
4010 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
4011 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
4012
4013 return ret;
4014 }
4015
smu_send_hbm_bad_channel_flag(struct smu_context * smu,uint32_t size)4016 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
4017 {
4018 int ret = 0;
4019
4020 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
4021 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
4022
4023 return ret;
4024 }
4025
smu_send_rma_reason(struct smu_context * smu)4026 int smu_send_rma_reason(struct smu_context *smu)
4027 {
4028 int ret = 0;
4029
4030 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
4031 ret = smu->ppt_funcs->send_rma_reason(smu);
4032
4033 return ret;
4034 }
4035
4036 /**
4037 * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU
4038 * @smu: smu_context pointer
4039 *
4040 * This function checks if the SMU supports resetting the SDMA engine.
4041 * It returns true if supported, false otherwise.
4042 */
smu_reset_sdma_is_supported(struct smu_context * smu)4043 bool smu_reset_sdma_is_supported(struct smu_context *smu)
4044 {
4045 bool ret = false;
4046
4047 if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported)
4048 ret = smu->ppt_funcs->reset_sdma_is_supported(smu);
4049
4050 return ret;
4051 }
4052
smu_reset_sdma(struct smu_context * smu,uint32_t inst_mask)4053 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
4054 {
4055 int ret = 0;
4056
4057 if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)
4058 ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);
4059
4060 return ret;
4061 }
4062
smu_reset_vcn(struct smu_context * smu,uint32_t inst_mask)4063 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
4064 {
4065 if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
4066 smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
4067
4068 return 0;
4069 }
4070