1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #define SWSMU_CODE_LAYER_L1
24
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "smu_v14_0_0_ppt.h"
48 #include "smu_v14_0_2_ppt.h"
49 #include "amd_pcie.h"
50
51 /*
52 * DO NOT use these for err/warn/info/debug messages.
53 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54 * They are more MGPU friendly.
55 */
56 #undef pr_err
57 #undef pr_warn
58 #undef pr_info
59 #undef pr_debug
60
61 static const struct amd_pm_funcs swsmu_pm_funcs;
62 static int smu_force_smuclk_levels(struct smu_context *smu,
63 enum smu_clk_type clk_type,
64 uint32_t mask);
65 static int smu_handle_task(struct smu_context *smu,
66 enum amd_dpm_forced_level level,
67 enum amd_pp_task task_id);
68 static int smu_reset(struct smu_context *smu);
69 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70 static int smu_set_fan_control_mode(void *handle, u32 value);
71 static int smu_set_power_limit(void *handle, uint32_t limit);
72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75
smu_sys_get_pp_feature_mask(void * handle,char * buf)76 static int smu_sys_get_pp_feature_mask(void *handle,
77 char *buf)
78 {
79 struct smu_context *smu = handle;
80
81 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
82 return -EOPNOTSUPP;
83
84 return smu_get_pp_feature_mask(smu, buf);
85 }
86
smu_sys_set_pp_feature_mask(void * handle,uint64_t new_mask)87 static int smu_sys_set_pp_feature_mask(void *handle,
88 uint64_t new_mask)
89 {
90 struct smu_context *smu = handle;
91
92 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
93 return -EOPNOTSUPP;
94
95 return smu_set_pp_feature_mask(smu, new_mask);
96 }
97
smu_set_residency_gfxoff(struct smu_context * smu,bool value)98 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
99 {
100 if (!smu->ppt_funcs->set_gfx_off_residency)
101 return -EINVAL;
102
103 return smu_set_gfx_off_residency(smu, value);
104 }
105
smu_get_residency_gfxoff(struct smu_context * smu,u32 * value)106 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
107 {
108 if (!smu->ppt_funcs->get_gfx_off_residency)
109 return -EINVAL;
110
111 return smu_get_gfx_off_residency(smu, value);
112 }
113
smu_get_entrycount_gfxoff(struct smu_context * smu,u64 * value)114 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
115 {
116 if (!smu->ppt_funcs->get_gfx_off_entrycount)
117 return -EINVAL;
118
119 return smu_get_gfx_off_entrycount(smu, value);
120 }
121
smu_get_status_gfxoff(struct smu_context * smu,uint32_t * value)122 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
123 {
124 if (!smu->ppt_funcs->get_gfx_off_status)
125 return -EINVAL;
126
127 *value = smu_get_gfx_off_status(smu);
128
129 return 0;
130 }
131
smu_set_soft_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)132 int smu_set_soft_freq_range(struct smu_context *smu,
133 enum smu_clk_type clk_type,
134 uint32_t min,
135 uint32_t max)
136 {
137 int ret = 0;
138
139 if (smu->ppt_funcs->set_soft_freq_limited_range)
140 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
141 clk_type,
142 min,
143 max);
144
145 return ret;
146 }
147
smu_get_dpm_freq_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)148 int smu_get_dpm_freq_range(struct smu_context *smu,
149 enum smu_clk_type clk_type,
150 uint32_t *min,
151 uint32_t *max)
152 {
153 int ret = -ENOTSUPP;
154
155 if (!min && !max)
156 return -EINVAL;
157
158 if (smu->ppt_funcs->get_dpm_ultimate_freq)
159 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
160 clk_type,
161 min,
162 max);
163
164 return ret;
165 }
166
smu_set_gfx_power_up_by_imu(struct smu_context * smu)167 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
168 {
169 int ret = 0;
170 struct amdgpu_device *adev = smu->adev;
171
172 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
173 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
174 if (ret)
175 dev_err(adev->dev, "Failed to enable gfx imu!\n");
176 }
177 return ret;
178 }
179
smu_get_mclk(void * handle,bool low)180 static u32 smu_get_mclk(void *handle, bool low)
181 {
182 struct smu_context *smu = handle;
183 uint32_t clk_freq;
184 int ret = 0;
185
186 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
187 low ? &clk_freq : NULL,
188 !low ? &clk_freq : NULL);
189 if (ret)
190 return 0;
191 return clk_freq * 100;
192 }
193
smu_get_sclk(void * handle,bool low)194 static u32 smu_get_sclk(void *handle, bool low)
195 {
196 struct smu_context *smu = handle;
197 uint32_t clk_freq;
198 int ret = 0;
199
200 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
201 low ? &clk_freq : NULL,
202 !low ? &clk_freq : NULL);
203 if (ret)
204 return 0;
205 return clk_freq * 100;
206 }
207
smu_set_gfx_imu_enable(struct smu_context * smu)208 static int smu_set_gfx_imu_enable(struct smu_context *smu)
209 {
210 struct amdgpu_device *adev = smu->adev;
211
212 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
213 return 0;
214
215 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
216 return 0;
217
218 return smu_set_gfx_power_up_by_imu(smu);
219 }
220
is_vcn_enabled(struct amdgpu_device * adev)221 static bool is_vcn_enabled(struct amdgpu_device *adev)
222 {
223 int i;
224
225 for (i = 0; i < adev->num_ip_blocks; i++) {
226 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
227 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
228 !adev->ip_blocks[i].status.valid)
229 return false;
230 }
231
232 return true;
233 }
234
smu_dpm_set_vcn_enable(struct smu_context * smu,bool enable)235 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
236 bool enable)
237 {
238 struct smu_power_context *smu_power = &smu->smu_power;
239 struct smu_power_gate *power_gate = &smu_power->power_gate;
240 int ret = 0;
241
242 /*
243 * don't poweron vcn/jpeg when they are skipped.
244 */
245 if (!is_vcn_enabled(smu->adev))
246 return 0;
247
248 if (!smu->ppt_funcs->dpm_set_vcn_enable)
249 return 0;
250
251 if (atomic_read(&power_gate->vcn_gated) ^ enable)
252 return 0;
253
254 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
255 if (!ret)
256 atomic_set(&power_gate->vcn_gated, !enable);
257
258 return ret;
259 }
260
smu_dpm_set_jpeg_enable(struct smu_context * smu,bool enable)261 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
262 bool enable)
263 {
264 struct smu_power_context *smu_power = &smu->smu_power;
265 struct smu_power_gate *power_gate = &smu_power->power_gate;
266 int ret = 0;
267
268 if (!is_vcn_enabled(smu->adev))
269 return 0;
270
271 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
272 return 0;
273
274 if (atomic_read(&power_gate->jpeg_gated) ^ enable)
275 return 0;
276
277 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
278 if (!ret)
279 atomic_set(&power_gate->jpeg_gated, !enable);
280
281 return ret;
282 }
283
smu_dpm_set_vpe_enable(struct smu_context * smu,bool enable)284 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
285 bool enable)
286 {
287 struct smu_power_context *smu_power = &smu->smu_power;
288 struct smu_power_gate *power_gate = &smu_power->power_gate;
289 int ret = 0;
290
291 if (!smu->ppt_funcs->dpm_set_vpe_enable)
292 return 0;
293
294 if (atomic_read(&power_gate->vpe_gated) ^ enable)
295 return 0;
296
297 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
298 if (!ret)
299 atomic_set(&power_gate->vpe_gated, !enable);
300
301 return ret;
302 }
303
smu_dpm_set_umsch_mm_enable(struct smu_context * smu,bool enable)304 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
305 bool enable)
306 {
307 struct smu_power_context *smu_power = &smu->smu_power;
308 struct smu_power_gate *power_gate = &smu_power->power_gate;
309 int ret = 0;
310
311 if (!smu->adev->enable_umsch_mm)
312 return 0;
313
314 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
315 return 0;
316
317 if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
318 return 0;
319
320 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
321 if (!ret)
322 atomic_set(&power_gate->umsch_mm_gated, !enable);
323
324 return ret;
325 }
326
smu_set_mall_enable(struct smu_context * smu)327 static int smu_set_mall_enable(struct smu_context *smu)
328 {
329 int ret = 0;
330
331 if (!smu->ppt_funcs->set_mall_enable)
332 return 0;
333
334 ret = smu->ppt_funcs->set_mall_enable(smu);
335
336 return ret;
337 }
338
339 /**
340 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
341 *
342 * @handle: smu_context pointer
343 * @block_type: the IP block to power gate/ungate
344 * @gate: to power gate if true, ungate otherwise
345 *
346 * This API uses no smu->mutex lock protection due to:
347 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
348 * This is guarded to be race condition free by the caller.
349 * 2. Or get called on user setting request of power_dpm_force_performance_level.
350 * Under this case, the smu->mutex lock protection is already enforced on
351 * the parent API smu_force_performance_level of the call path.
352 */
smu_dpm_set_power_gate(void * handle,uint32_t block_type,bool gate)353 static int smu_dpm_set_power_gate(void *handle,
354 uint32_t block_type,
355 bool gate)
356 {
357 struct smu_context *smu = handle;
358 int ret = 0;
359
360 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
361 dev_WARN(smu->adev->dev,
362 "SMU uninitialized but power %s requested for %u!\n",
363 gate ? "gate" : "ungate", block_type);
364 return -EOPNOTSUPP;
365 }
366
367 switch (block_type) {
368 /*
369 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
370 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
371 */
372 case AMD_IP_BLOCK_TYPE_UVD:
373 case AMD_IP_BLOCK_TYPE_VCN:
374 ret = smu_dpm_set_vcn_enable(smu, !gate);
375 if (ret)
376 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
377 gate ? "gate" : "ungate");
378 break;
379 case AMD_IP_BLOCK_TYPE_GFX:
380 ret = smu_gfx_off_control(smu, gate);
381 if (ret)
382 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
383 gate ? "enable" : "disable");
384 break;
385 case AMD_IP_BLOCK_TYPE_SDMA:
386 ret = smu_powergate_sdma(smu, gate);
387 if (ret)
388 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
389 gate ? "gate" : "ungate");
390 break;
391 case AMD_IP_BLOCK_TYPE_JPEG:
392 ret = smu_dpm_set_jpeg_enable(smu, !gate);
393 if (ret)
394 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
395 gate ? "gate" : "ungate");
396 break;
397 case AMD_IP_BLOCK_TYPE_VPE:
398 ret = smu_dpm_set_vpe_enable(smu, !gate);
399 if (ret)
400 dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
401 gate ? "gate" : "ungate");
402 break;
403 default:
404 dev_err(smu->adev->dev, "Unsupported block type!\n");
405 return -EINVAL;
406 }
407
408 return ret;
409 }
410
411 /**
412 * smu_set_user_clk_dependencies - set user profile clock dependencies
413 *
414 * @smu: smu_context pointer
415 * @clk: enum smu_clk_type type
416 *
417 * Enable/Disable the clock dependency for the @clk type.
418 */
smu_set_user_clk_dependencies(struct smu_context * smu,enum smu_clk_type clk)419 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
420 {
421 if (smu->adev->in_suspend)
422 return;
423
424 if (clk == SMU_MCLK) {
425 smu->user_dpm_profile.clk_dependency = 0;
426 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
427 } else if (clk == SMU_FCLK) {
428 /* MCLK takes precedence over FCLK */
429 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
430 return;
431
432 smu->user_dpm_profile.clk_dependency = 0;
433 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
434 } else if (clk == SMU_SOCCLK) {
435 /* MCLK takes precedence over SOCCLK */
436 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
437 return;
438
439 smu->user_dpm_profile.clk_dependency = 0;
440 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
441 } else
442 /* Add clk dependencies here, if any */
443 return;
444 }
445
446 /**
447 * smu_restore_dpm_user_profile - reinstate user dpm profile
448 *
449 * @smu: smu_context pointer
450 *
451 * Restore the saved user power configurations include power limit,
452 * clock frequencies, fan control mode and fan speed.
453 */
smu_restore_dpm_user_profile(struct smu_context * smu)454 static void smu_restore_dpm_user_profile(struct smu_context *smu)
455 {
456 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
457 int ret = 0;
458
459 if (!smu->adev->in_suspend)
460 return;
461
462 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
463 return;
464
465 /* Enable restore flag */
466 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
467
468 /* set the user dpm power limit */
469 if (smu->user_dpm_profile.power_limit) {
470 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
471 if (ret)
472 dev_err(smu->adev->dev, "Failed to set power limit value\n");
473 }
474
475 /* set the user dpm clock configurations */
476 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
477 enum smu_clk_type clk_type;
478
479 for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
480 /*
481 * Iterate over smu clk type and force the saved user clk
482 * configs, skip if clock dependency is enabled
483 */
484 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
485 smu->user_dpm_profile.clk_mask[clk_type]) {
486 ret = smu_force_smuclk_levels(smu, clk_type,
487 smu->user_dpm_profile.clk_mask[clk_type]);
488 if (ret)
489 dev_err(smu->adev->dev,
490 "Failed to set clock type = %d\n", clk_type);
491 }
492 }
493 }
494
495 /* set the user dpm fan configurations */
496 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
497 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
498 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
499 if (ret != -EOPNOTSUPP) {
500 smu->user_dpm_profile.fan_speed_pwm = 0;
501 smu->user_dpm_profile.fan_speed_rpm = 0;
502 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
503 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
504 }
505
506 if (smu->user_dpm_profile.fan_speed_pwm) {
507 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
508 if (ret != -EOPNOTSUPP)
509 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
510 }
511
512 if (smu->user_dpm_profile.fan_speed_rpm) {
513 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
514 if (ret != -EOPNOTSUPP)
515 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
516 }
517 }
518
519 /* Restore user customized OD settings */
520 if (smu->user_dpm_profile.user_od) {
521 if (smu->ppt_funcs->restore_user_od_settings) {
522 ret = smu->ppt_funcs->restore_user_od_settings(smu);
523 if (ret)
524 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
525 }
526 }
527
528 /* Disable restore flag */
529 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
530 }
531
smu_get_power_num_states(void * handle,struct pp_states_info * state_info)532 static int smu_get_power_num_states(void *handle,
533 struct pp_states_info *state_info)
534 {
535 if (!state_info)
536 return -EINVAL;
537
538 /* not support power state */
539 memset(state_info, 0, sizeof(struct pp_states_info));
540 state_info->nums = 1;
541 state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
542
543 return 0;
544 }
545
is_support_sw_smu(struct amdgpu_device * adev)546 bool is_support_sw_smu(struct amdgpu_device *adev)
547 {
548 /* vega20 is 11.0.2, but it's supported via the powerplay code */
549 if (adev->asic_type == CHIP_VEGA20)
550 return false;
551
552 if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))
553 return true;
554
555 return false;
556 }
557
is_support_cclk_dpm(struct amdgpu_device * adev)558 bool is_support_cclk_dpm(struct amdgpu_device *adev)
559 {
560 struct smu_context *smu = adev->powerplay.pp_handle;
561
562 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
563 return false;
564
565 return true;
566 }
567
568
smu_sys_get_pp_table(void * handle,char ** table)569 static int smu_sys_get_pp_table(void *handle,
570 char **table)
571 {
572 struct smu_context *smu = handle;
573 struct smu_table_context *smu_table = &smu->smu_table;
574
575 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
576 return -EOPNOTSUPP;
577
578 if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
579 return -EINVAL;
580
581 if (smu_table->hardcode_pptable)
582 *table = smu_table->hardcode_pptable;
583 else
584 *table = smu_table->power_play_table;
585
586 return smu_table->power_play_table_size;
587 }
588
smu_sys_set_pp_table(void * handle,const char * buf,size_t size)589 static int smu_sys_set_pp_table(void *handle,
590 const char *buf,
591 size_t size)
592 {
593 struct smu_context *smu = handle;
594 struct smu_table_context *smu_table = &smu->smu_table;
595 ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
596 int ret = 0;
597
598 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
599 return -EOPNOTSUPP;
600
601 if (header->usStructureSize != size) {
602 dev_err(smu->adev->dev, "pp table size not matched !\n");
603 return -EIO;
604 }
605
606 if (!smu_table->hardcode_pptable) {
607 smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
608 if (!smu_table->hardcode_pptable)
609 return -ENOMEM;
610 }
611
612 memcpy(smu_table->hardcode_pptable, buf, size);
613 smu_table->power_play_table = smu_table->hardcode_pptable;
614 smu_table->power_play_table_size = size;
615
616 /*
617 * Special hw_fini action(for Navi1x, the DPMs disablement will be
618 * skipped) may be needed for custom pptable uploading.
619 */
620 smu->uploading_custom_pp_table = true;
621
622 ret = smu_reset(smu);
623 if (ret)
624 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
625
626 smu->uploading_custom_pp_table = false;
627
628 return ret;
629 }
630
smu_get_driver_allowed_feature_mask(struct smu_context * smu)631 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
632 {
633 struct smu_feature *feature = &smu->smu_feature;
634 uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
635 int ret = 0;
636
637 /*
638 * With SCPM enabled, the allowed featuremasks setting(via
639 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
640 * That means there is no way to let PMFW knows the settings below.
641 * Thus, we just assume all the features are allowed under
642 * such scenario.
643 */
644 if (smu->adev->scpm_enabled) {
645 bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
646 return 0;
647 }
648
649 bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
650
651 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
652 SMU_FEATURE_MAX/32);
653 if (ret)
654 return ret;
655
656 bitmap_or(feature->allowed, feature->allowed,
657 (unsigned long *)allowed_feature_mask,
658 feature->feature_num);
659
660 return ret;
661 }
662
smu_set_funcs(struct amdgpu_device * adev)663 static int smu_set_funcs(struct amdgpu_device *adev)
664 {
665 struct smu_context *smu = adev->powerplay.pp_handle;
666
667 if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
668 smu->od_enabled = true;
669
670 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
671 case IP_VERSION(11, 0, 0):
672 case IP_VERSION(11, 0, 5):
673 case IP_VERSION(11, 0, 9):
674 navi10_set_ppt_funcs(smu);
675 break;
676 case IP_VERSION(11, 0, 7):
677 case IP_VERSION(11, 0, 11):
678 case IP_VERSION(11, 0, 12):
679 case IP_VERSION(11, 0, 13):
680 sienna_cichlid_set_ppt_funcs(smu);
681 break;
682 case IP_VERSION(12, 0, 0):
683 case IP_VERSION(12, 0, 1):
684 renoir_set_ppt_funcs(smu);
685 break;
686 case IP_VERSION(11, 5, 0):
687 vangogh_set_ppt_funcs(smu);
688 break;
689 case IP_VERSION(13, 0, 1):
690 case IP_VERSION(13, 0, 3):
691 case IP_VERSION(13, 0, 8):
692 yellow_carp_set_ppt_funcs(smu);
693 break;
694 case IP_VERSION(13, 0, 4):
695 case IP_VERSION(13, 0, 11):
696 smu_v13_0_4_set_ppt_funcs(smu);
697 break;
698 case IP_VERSION(13, 0, 5):
699 smu_v13_0_5_set_ppt_funcs(smu);
700 break;
701 case IP_VERSION(11, 0, 8):
702 cyan_skillfish_set_ppt_funcs(smu);
703 break;
704 case IP_VERSION(11, 0, 2):
705 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
706 arcturus_set_ppt_funcs(smu);
707 /* OD is not supported on Arcturus */
708 smu->od_enabled = false;
709 break;
710 case IP_VERSION(13, 0, 2):
711 aldebaran_set_ppt_funcs(smu);
712 /* Enable pp_od_clk_voltage node */
713 smu->od_enabled = true;
714 break;
715 case IP_VERSION(13, 0, 0):
716 case IP_VERSION(13, 0, 10):
717 smu_v13_0_0_set_ppt_funcs(smu);
718 break;
719 case IP_VERSION(13, 0, 6):
720 case IP_VERSION(13, 0, 14):
721 smu_v13_0_6_set_ppt_funcs(smu);
722 /* Enable pp_od_clk_voltage node */
723 smu->od_enabled = true;
724 break;
725 case IP_VERSION(13, 0, 7):
726 smu_v13_0_7_set_ppt_funcs(smu);
727 break;
728 case IP_VERSION(14, 0, 0):
729 case IP_VERSION(14, 0, 1):
730 case IP_VERSION(14, 0, 4):
731 smu_v14_0_0_set_ppt_funcs(smu);
732 break;
733 case IP_VERSION(14, 0, 2):
734 case IP_VERSION(14, 0, 3):
735 smu_v14_0_2_set_ppt_funcs(smu);
736 break;
737 default:
738 return -EINVAL;
739 }
740
741 return 0;
742 }
743
smu_early_init(void * handle)744 static int smu_early_init(void *handle)
745 {
746 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
747 struct smu_context *smu;
748 int r;
749
750 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
751 if (!smu)
752 return -ENOMEM;
753
754 smu->adev = adev;
755 smu->pm_enabled = !!amdgpu_dpm;
756 smu->is_apu = false;
757 smu->smu_baco.state = SMU_BACO_STATE_NONE;
758 smu->smu_baco.platform_support = false;
759 smu->smu_baco.maco_support = false;
760 smu->user_dpm_profile.fan_mode = -1;
761
762 mutex_init(&smu->message_lock);
763
764 adev->powerplay.pp_handle = smu;
765 adev->powerplay.pp_funcs = &swsmu_pm_funcs;
766
767 r = smu_set_funcs(adev);
768 if (r)
769 return r;
770 return smu_init_microcode(smu);
771 }
772
smu_set_default_dpm_table(struct smu_context * smu)773 static int smu_set_default_dpm_table(struct smu_context *smu)
774 {
775 struct amdgpu_device *adev = smu->adev;
776 struct smu_power_context *smu_power = &smu->smu_power;
777 struct smu_power_gate *power_gate = &smu_power->power_gate;
778 int vcn_gate, jpeg_gate;
779 int ret = 0;
780
781 if (!smu->ppt_funcs->set_default_dpm_table)
782 return 0;
783
784 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
785 vcn_gate = atomic_read(&power_gate->vcn_gated);
786 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
787 jpeg_gate = atomic_read(&power_gate->jpeg_gated);
788
789 if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
790 ret = smu_dpm_set_vcn_enable(smu, true);
791 if (ret)
792 return ret;
793 }
794
795 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
796 ret = smu_dpm_set_jpeg_enable(smu, true);
797 if (ret)
798 goto err_out;
799 }
800
801 ret = smu->ppt_funcs->set_default_dpm_table(smu);
802 if (ret)
803 dev_err(smu->adev->dev,
804 "Failed to setup default dpm clock tables!\n");
805
806 if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
807 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
808 err_out:
809 if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
810 smu_dpm_set_vcn_enable(smu, !vcn_gate);
811
812 return ret;
813 }
814
smu_apply_default_config_table_settings(struct smu_context * smu)815 static int smu_apply_default_config_table_settings(struct smu_context *smu)
816 {
817 struct amdgpu_device *adev = smu->adev;
818 int ret = 0;
819
820 ret = smu_get_default_config_table_settings(smu,
821 &adev->pm.config_table);
822 if (ret)
823 return ret;
824
825 return smu_set_config_table(smu, &adev->pm.config_table);
826 }
827
smu_late_init(void * handle)828 static int smu_late_init(void *handle)
829 {
830 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
831 struct smu_context *smu = adev->powerplay.pp_handle;
832 int ret = 0;
833
834 smu_set_fine_grain_gfx_freq_parameters(smu);
835
836 if (!smu->pm_enabled)
837 return 0;
838
839 ret = smu_post_init(smu);
840 if (ret) {
841 dev_err(adev->dev, "Failed to post smu init!\n");
842 return ret;
843 }
844
845 /*
846 * Explicitly notify PMFW the power mode the system in. Since
847 * the PMFW may boot the ASIC with a different mode.
848 * For those supporting ACDC switch via gpio, PMFW will
849 * handle the switch automatically. Driver involvement
850 * is unnecessary.
851 */
852 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
853 smu_set_ac_dc(smu);
854
855 if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
856 (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
857 return 0;
858
859 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
860 ret = smu_set_default_od_settings(smu);
861 if (ret) {
862 dev_err(adev->dev, "Failed to setup default OD settings!\n");
863 return ret;
864 }
865 }
866
867 ret = smu_populate_umd_state_clk(smu);
868 if (ret) {
869 dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
870 return ret;
871 }
872
873 ret = smu_get_asic_power_limits(smu,
874 &smu->current_power_limit,
875 &smu->default_power_limit,
876 &smu->max_power_limit,
877 &smu->min_power_limit);
878 if (ret) {
879 dev_err(adev->dev, "Failed to get asic power limits!\n");
880 return ret;
881 }
882
883 if (!amdgpu_sriov_vf(adev))
884 smu_get_unique_id(smu);
885
886 smu_get_fan_parameters(smu);
887
888 smu_handle_task(smu,
889 smu->smu_dpm.dpm_level,
890 AMD_PP_TASK_COMPLETE_INIT);
891
892 ret = smu_apply_default_config_table_settings(smu);
893 if (ret && (ret != -EOPNOTSUPP)) {
894 dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
895 return ret;
896 }
897
898 smu_restore_dpm_user_profile(smu);
899
900 return 0;
901 }
902
smu_init_fb_allocations(struct smu_context * smu)903 static int smu_init_fb_allocations(struct smu_context *smu)
904 {
905 struct amdgpu_device *adev = smu->adev;
906 struct smu_table_context *smu_table = &smu->smu_table;
907 struct smu_table *tables = smu_table->tables;
908 struct smu_table *driver_table = &(smu_table->driver_table);
909 uint32_t max_table_size = 0;
910 int ret, i;
911
912 /* VRAM allocation for tool table */
913 if (tables[SMU_TABLE_PMSTATUSLOG].size) {
914 ret = amdgpu_bo_create_kernel(adev,
915 tables[SMU_TABLE_PMSTATUSLOG].size,
916 tables[SMU_TABLE_PMSTATUSLOG].align,
917 tables[SMU_TABLE_PMSTATUSLOG].domain,
918 &tables[SMU_TABLE_PMSTATUSLOG].bo,
919 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
920 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
921 if (ret) {
922 dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
923 return ret;
924 }
925 }
926
927 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
928 /* VRAM allocation for driver table */
929 for (i = 0; i < SMU_TABLE_COUNT; i++) {
930 if (tables[i].size == 0)
931 continue;
932
933 /* If one of the tables has VRAM domain restriction, keep it in
934 * VRAM
935 */
936 if ((tables[i].domain &
937 (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
938 AMDGPU_GEM_DOMAIN_VRAM)
939 driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
940
941 if (i == SMU_TABLE_PMSTATUSLOG)
942 continue;
943
944 if (max_table_size < tables[i].size)
945 max_table_size = tables[i].size;
946 }
947
948 driver_table->size = max_table_size;
949 driver_table->align = PAGE_SIZE;
950
951 ret = amdgpu_bo_create_kernel(adev,
952 driver_table->size,
953 driver_table->align,
954 driver_table->domain,
955 &driver_table->bo,
956 &driver_table->mc_address,
957 &driver_table->cpu_addr);
958 if (ret) {
959 dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
960 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
961 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
962 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
963 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
964 }
965
966 return ret;
967 }
968
smu_fini_fb_allocations(struct smu_context * smu)969 static int smu_fini_fb_allocations(struct smu_context *smu)
970 {
971 struct smu_table_context *smu_table = &smu->smu_table;
972 struct smu_table *tables = smu_table->tables;
973 struct smu_table *driver_table = &(smu_table->driver_table);
974
975 if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
976 amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
977 &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
978 &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
979
980 amdgpu_bo_free_kernel(&driver_table->bo,
981 &driver_table->mc_address,
982 &driver_table->cpu_addr);
983
984 return 0;
985 }
986
987 /**
988 * smu_alloc_memory_pool - allocate memory pool in the system memory
989 *
990 * @smu: amdgpu_device pointer
991 *
992 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
993 * and DramLogSetDramAddr can notify it changed.
994 *
995 * Returns 0 on success, error on failure.
996 */
smu_alloc_memory_pool(struct smu_context * smu)997 static int smu_alloc_memory_pool(struct smu_context *smu)
998 {
999 struct amdgpu_device *adev = smu->adev;
1000 struct smu_table_context *smu_table = &smu->smu_table;
1001 struct smu_table *memory_pool = &smu_table->memory_pool;
1002 uint64_t pool_size = smu->pool_size;
1003 int ret = 0;
1004
1005 if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1006 return ret;
1007
1008 memory_pool->size = pool_size;
1009 memory_pool->align = PAGE_SIZE;
1010 memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1011
1012 switch (pool_size) {
1013 case SMU_MEMORY_POOL_SIZE_256_MB:
1014 case SMU_MEMORY_POOL_SIZE_512_MB:
1015 case SMU_MEMORY_POOL_SIZE_1_GB:
1016 case SMU_MEMORY_POOL_SIZE_2_GB:
1017 ret = amdgpu_bo_create_kernel(adev,
1018 memory_pool->size,
1019 memory_pool->align,
1020 memory_pool->domain,
1021 &memory_pool->bo,
1022 &memory_pool->mc_address,
1023 &memory_pool->cpu_addr);
1024 if (ret)
1025 dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1026 break;
1027 default:
1028 break;
1029 }
1030
1031 return ret;
1032 }
1033
smu_free_memory_pool(struct smu_context * smu)1034 static int smu_free_memory_pool(struct smu_context *smu)
1035 {
1036 struct smu_table_context *smu_table = &smu->smu_table;
1037 struct smu_table *memory_pool = &smu_table->memory_pool;
1038
1039 if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1040 return 0;
1041
1042 amdgpu_bo_free_kernel(&memory_pool->bo,
1043 &memory_pool->mc_address,
1044 &memory_pool->cpu_addr);
1045
1046 memset(memory_pool, 0, sizeof(struct smu_table));
1047
1048 return 0;
1049 }
1050
smu_alloc_dummy_read_table(struct smu_context * smu)1051 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1052 {
1053 struct smu_table_context *smu_table = &smu->smu_table;
1054 struct smu_table *dummy_read_1_table =
1055 &smu_table->dummy_read_1_table;
1056 struct amdgpu_device *adev = smu->adev;
1057 int ret = 0;
1058
1059 if (!dummy_read_1_table->size)
1060 return 0;
1061
1062 ret = amdgpu_bo_create_kernel(adev,
1063 dummy_read_1_table->size,
1064 dummy_read_1_table->align,
1065 dummy_read_1_table->domain,
1066 &dummy_read_1_table->bo,
1067 &dummy_read_1_table->mc_address,
1068 &dummy_read_1_table->cpu_addr);
1069 if (ret)
1070 dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1071
1072 return ret;
1073 }
1074
smu_free_dummy_read_table(struct smu_context * smu)1075 static void smu_free_dummy_read_table(struct smu_context *smu)
1076 {
1077 struct smu_table_context *smu_table = &smu->smu_table;
1078 struct smu_table *dummy_read_1_table =
1079 &smu_table->dummy_read_1_table;
1080
1081
1082 amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1083 &dummy_read_1_table->mc_address,
1084 &dummy_read_1_table->cpu_addr);
1085
1086 memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1087 }
1088
smu_smc_table_sw_init(struct smu_context * smu)1089 static int smu_smc_table_sw_init(struct smu_context *smu)
1090 {
1091 int ret;
1092
1093 /**
1094 * Create smu_table structure, and init smc tables such as
1095 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1096 */
1097 ret = smu_init_smc_tables(smu);
1098 if (ret) {
1099 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1100 return ret;
1101 }
1102
1103 /**
1104 * Create smu_power_context structure, and allocate smu_dpm_context and
1105 * context size to fill the smu_power_context data.
1106 */
1107 ret = smu_init_power(smu);
1108 if (ret) {
1109 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1110 return ret;
1111 }
1112
1113 /*
1114 * allocate vram bos to store smc table contents.
1115 */
1116 ret = smu_init_fb_allocations(smu);
1117 if (ret)
1118 return ret;
1119
1120 ret = smu_alloc_memory_pool(smu);
1121 if (ret)
1122 return ret;
1123
1124 ret = smu_alloc_dummy_read_table(smu);
1125 if (ret)
1126 return ret;
1127
1128 ret = smu_i2c_init(smu);
1129 if (ret)
1130 return ret;
1131
1132 return 0;
1133 }
1134
smu_smc_table_sw_fini(struct smu_context * smu)1135 static int smu_smc_table_sw_fini(struct smu_context *smu)
1136 {
1137 int ret;
1138
1139 smu_i2c_fini(smu);
1140
1141 smu_free_dummy_read_table(smu);
1142
1143 ret = smu_free_memory_pool(smu);
1144 if (ret)
1145 return ret;
1146
1147 ret = smu_fini_fb_allocations(smu);
1148 if (ret)
1149 return ret;
1150
1151 ret = smu_fini_power(smu);
1152 if (ret) {
1153 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1154 return ret;
1155 }
1156
1157 ret = smu_fini_smc_tables(smu);
1158 if (ret) {
1159 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1160 return ret;
1161 }
1162
1163 return 0;
1164 }
1165
smu_throttling_logging_work_fn(struct work_struct * work)1166 static void smu_throttling_logging_work_fn(struct work_struct *work)
1167 {
1168 struct smu_context *smu = container_of(work, struct smu_context,
1169 throttling_logging_work);
1170
1171 smu_log_thermal_throttling(smu);
1172 }
1173
smu_interrupt_work_fn(struct work_struct * work)1174 static void smu_interrupt_work_fn(struct work_struct *work)
1175 {
1176 struct smu_context *smu = container_of(work, struct smu_context,
1177 interrupt_work);
1178
1179 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1180 smu->ppt_funcs->interrupt_work(smu);
1181 }
1182
smu_swctf_delayed_work_handler(struct work_struct * work)1183 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1184 {
1185 struct smu_context *smu =
1186 container_of(work, struct smu_context, swctf_delayed_work.work);
1187 struct smu_temperature_range *range =
1188 &smu->thermal_range;
1189 struct amdgpu_device *adev = smu->adev;
1190 uint32_t hotspot_tmp, size;
1191
1192 /*
1193 * If the hotspot temperature is confirmed as below SW CTF setting point
1194 * after the delay enforced, nothing will be done.
1195 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1196 */
1197 if (range->software_shutdown_temp &&
1198 smu->ppt_funcs->read_sensor &&
1199 !smu->ppt_funcs->read_sensor(smu,
1200 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1201 &hotspot_tmp,
1202 &size) &&
1203 hotspot_tmp / 1000 < range->software_shutdown_temp)
1204 return;
1205
1206 dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1207 dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1208 orderly_poweroff(true);
1209 }
1210
smu_init_xgmi_plpd_mode(struct smu_context * smu)1211 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1212 {
1213 struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1214 struct smu_dpm_policy_ctxt *policy_ctxt;
1215 struct smu_dpm_policy *policy;
1216
1217 policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1218 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1219 if (policy)
1220 policy->current_level = XGMI_PLPD_DEFAULT;
1221 return;
1222 }
1223
1224 /* PMFW put PLPD into default policy after enabling the feature */
1225 if (smu_feature_is_enabled(smu,
1226 SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1227 if (policy)
1228 policy->current_level = XGMI_PLPD_DEFAULT;
1229 } else {
1230 policy_ctxt = dpm_ctxt->dpm_policies;
1231 if (policy_ctxt)
1232 policy_ctxt->policy_mask &=
1233 ~BIT(PP_PM_POLICY_XGMI_PLPD);
1234 }
1235 }
1236
smu_sw_init(void * handle)1237 static int smu_sw_init(void *handle)
1238 {
1239 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1240 struct smu_context *smu = adev->powerplay.pp_handle;
1241 int ret;
1242
1243 smu->pool_size = adev->pm.smu_prv_buffer_size;
1244 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1245 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1246 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1247
1248 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1249 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1250 atomic64_set(&smu->throttle_int_counter, 0);
1251 smu->watermarks_bitmap = 0;
1252 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1253 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1254
1255 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1256 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1257 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1258 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1259
1260 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1261 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1262 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1263 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1264 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1265 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1266 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1267 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1268
1269 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1270 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1271 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1272 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1273 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1274 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1275 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1276 smu->display_config = &adev->pm.pm_display_cfg;
1277
1278 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1279 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1280
1281 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1282 smu_swctf_delayed_work_handler);
1283
1284 ret = smu_smc_table_sw_init(smu);
1285 if (ret) {
1286 dev_err(adev->dev, "Failed to sw init smc table!\n");
1287 return ret;
1288 }
1289
1290 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1291 ret = smu_get_vbios_bootup_values(smu);
1292 if (ret) {
1293 dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1294 return ret;
1295 }
1296
1297 ret = smu_init_pptable_microcode(smu);
1298 if (ret) {
1299 dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1300 return ret;
1301 }
1302
1303 ret = smu_register_irq_handler(smu);
1304 if (ret) {
1305 dev_err(adev->dev, "Failed to register smc irq handler!\n");
1306 return ret;
1307 }
1308
1309 /* If there is no way to query fan control mode, fan control is not supported */
1310 if (!smu->ppt_funcs->get_fan_control_mode)
1311 smu->adev->pm.no_fan = true;
1312
1313 return 0;
1314 }
1315
smu_sw_fini(void * handle)1316 static int smu_sw_fini(void *handle)
1317 {
1318 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1319 struct smu_context *smu = adev->powerplay.pp_handle;
1320 int ret;
1321
1322 ret = smu_smc_table_sw_fini(smu);
1323 if (ret) {
1324 dev_err(adev->dev, "Failed to sw fini smc table!\n");
1325 return ret;
1326 }
1327
1328 smu_fini_microcode(smu);
1329
1330 return 0;
1331 }
1332
smu_get_thermal_temperature_range(struct smu_context * smu)1333 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1334 {
1335 struct amdgpu_device *adev = smu->adev;
1336 struct smu_temperature_range *range =
1337 &smu->thermal_range;
1338 int ret = 0;
1339
1340 if (!smu->ppt_funcs->get_thermal_temperature_range)
1341 return 0;
1342
1343 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1344 if (ret)
1345 return ret;
1346
1347 adev->pm.dpm.thermal.min_temp = range->min;
1348 adev->pm.dpm.thermal.max_temp = range->max;
1349 adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1350 adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1351 adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1352 adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1353 adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1354 adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1355 adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1356
1357 return ret;
1358 }
1359
1360 /**
1361 * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1362 *
1363 * @smu: smu_context pointer
1364 *
1365 * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1366 * Returns 0 on success, error on failure.
1367 */
smu_wbrf_handle_exclusion_ranges(struct smu_context * smu)1368 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1369 {
1370 struct wbrf_ranges_in_out wbrf_exclusion = {0};
1371 struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1372 struct amdgpu_device *adev = smu->adev;
1373 uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1374 uint64_t start, end;
1375 int ret, i, j;
1376
1377 ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1378 if (ret) {
1379 dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1380 return ret;
1381 }
1382
1383 /*
1384 * The exclusion ranges array we got might be filled with holes and duplicate
1385 * entries. For example:
1386 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1387 * We need to do some sortups to eliminate those holes and duplicate entries.
1388 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1389 */
1390 for (i = 0; i < num_of_wbrf_ranges; i++) {
1391 start = wifi_bands[i].start;
1392 end = wifi_bands[i].end;
1393
1394 /* get the last valid entry to fill the intermediate hole */
1395 if (!start && !end) {
1396 for (j = num_of_wbrf_ranges - 1; j > i; j--)
1397 if (wifi_bands[j].start && wifi_bands[j].end)
1398 break;
1399
1400 /* no valid entry left */
1401 if (j <= i)
1402 break;
1403
1404 start = wifi_bands[i].start = wifi_bands[j].start;
1405 end = wifi_bands[i].end = wifi_bands[j].end;
1406 wifi_bands[j].start = 0;
1407 wifi_bands[j].end = 0;
1408 num_of_wbrf_ranges = j;
1409 }
1410
1411 /* eliminate duplicate entries */
1412 for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1413 if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1414 wifi_bands[j].start = 0;
1415 wifi_bands[j].end = 0;
1416 }
1417 }
1418 }
1419
1420 /* Send the sorted wifi_bands to PMFW */
1421 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1422 /* Try to set the wifi_bands again */
1423 if (unlikely(ret == -EBUSY)) {
1424 mdelay(5);
1425 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1426 }
1427
1428 return ret;
1429 }
1430
1431 /**
1432 * smu_wbrf_event_handler - handle notify events
1433 *
1434 * @nb: notifier block
1435 * @action: event type
1436 * @_arg: event data
1437 *
1438 * Calls relevant amdgpu function in response to wbrf event
1439 * notification from kernel.
1440 */
smu_wbrf_event_handler(struct notifier_block * nb,unsigned long action,void * _arg)1441 static int smu_wbrf_event_handler(struct notifier_block *nb,
1442 unsigned long action, void *_arg)
1443 {
1444 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1445
1446 switch (action) {
1447 case WBRF_CHANGED:
1448 schedule_delayed_work(&smu->wbrf_delayed_work,
1449 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1450 break;
1451 default:
1452 return NOTIFY_DONE;
1453 }
1454
1455 return NOTIFY_OK;
1456 }
1457
1458 /**
1459 * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1460 *
1461 * @work: struct work_struct pointer
1462 *
1463 * Flood is over and driver will consume the latest exclusion ranges.
1464 */
smu_wbrf_delayed_work_handler(struct work_struct * work)1465 static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1466 {
1467 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1468
1469 smu_wbrf_handle_exclusion_ranges(smu);
1470 }
1471
1472 /**
1473 * smu_wbrf_support_check - check wbrf support
1474 *
1475 * @smu: smu_context pointer
1476 *
1477 * Verifies the ACPI interface whether wbrf is supported.
1478 */
smu_wbrf_support_check(struct smu_context * smu)1479 static void smu_wbrf_support_check(struct smu_context *smu)
1480 {
1481 struct amdgpu_device *adev = smu->adev;
1482
1483 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1484 acpi_amd_wbrf_supported_consumer(adev->dev);
1485
1486 if (smu->wbrf_supported)
1487 dev_info(adev->dev, "RF interference mitigation is supported\n");
1488 }
1489
1490 /**
1491 * smu_wbrf_init - init driver wbrf support
1492 *
1493 * @smu: smu_context pointer
1494 *
1495 * Verifies the AMD ACPI interfaces and registers with the wbrf
1496 * notifier chain if wbrf feature is supported.
1497 * Returns 0 on success, error on failure.
1498 */
smu_wbrf_init(struct smu_context * smu)1499 static int smu_wbrf_init(struct smu_context *smu)
1500 {
1501 int ret;
1502
1503 if (!smu->wbrf_supported)
1504 return 0;
1505
1506 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1507
1508 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1509 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1510 if (ret)
1511 return ret;
1512
1513 /*
1514 * Some wifiband exclusion ranges may be already there
1515 * before our driver loaded. To make sure our driver
1516 * is awared of those exclusion ranges.
1517 */
1518 schedule_delayed_work(&smu->wbrf_delayed_work,
1519 msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1520
1521 return 0;
1522 }
1523
1524 /**
1525 * smu_wbrf_fini - tear down driver wbrf support
1526 *
1527 * @smu: smu_context pointer
1528 *
1529 * Unregisters with the wbrf notifier chain.
1530 */
smu_wbrf_fini(struct smu_context * smu)1531 static void smu_wbrf_fini(struct smu_context *smu)
1532 {
1533 if (!smu->wbrf_supported)
1534 return;
1535
1536 amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1537
1538 cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1539 }
1540
smu_smc_hw_setup(struct smu_context * smu)1541 static int smu_smc_hw_setup(struct smu_context *smu)
1542 {
1543 struct smu_feature *feature = &smu->smu_feature;
1544 struct amdgpu_device *adev = smu->adev;
1545 uint8_t pcie_gen = 0, pcie_width = 0;
1546 uint64_t features_supported;
1547 int ret = 0;
1548
1549 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1550 case IP_VERSION(11, 0, 7):
1551 case IP_VERSION(11, 0, 11):
1552 case IP_VERSION(11, 5, 0):
1553 case IP_VERSION(11, 0, 12):
1554 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1555 dev_info(adev->dev, "dpm has been enabled\n");
1556 ret = smu_system_features_control(smu, true);
1557 if (ret)
1558 dev_err(adev->dev, "Failed system features control!\n");
1559 return ret;
1560 }
1561 break;
1562 default:
1563 break;
1564 }
1565
1566 ret = smu_init_display_count(smu, 0);
1567 if (ret) {
1568 dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1569 return ret;
1570 }
1571
1572 ret = smu_set_driver_table_location(smu);
1573 if (ret) {
1574 dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1575 return ret;
1576 }
1577
1578 /*
1579 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1580 */
1581 ret = smu_set_tool_table_location(smu);
1582 if (ret) {
1583 dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1584 return ret;
1585 }
1586
1587 /*
1588 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1589 * pool location.
1590 */
1591 ret = smu_notify_memory_pool_location(smu);
1592 if (ret) {
1593 dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1594 return ret;
1595 }
1596
1597 /*
1598 * It is assumed the pptable used before runpm is same as
1599 * the one used afterwards. Thus, we can reuse the stored
1600 * copy and do not need to resetup the pptable again.
1601 */
1602 if (!adev->in_runpm) {
1603 ret = smu_setup_pptable(smu);
1604 if (ret) {
1605 dev_err(adev->dev, "Failed to setup pptable!\n");
1606 return ret;
1607 }
1608 }
1609
1610 /* smu_dump_pptable(smu); */
1611
1612 /*
1613 * With SCPM enabled, PSP is responsible for the PPTable transferring
1614 * (to SMU). Driver involvement is not needed and permitted.
1615 */
1616 if (!adev->scpm_enabled) {
1617 /*
1618 * Copy pptable bo in the vram to smc with SMU MSGs such as
1619 * SetDriverDramAddr and TransferTableDram2Smu.
1620 */
1621 ret = smu_write_pptable(smu);
1622 if (ret) {
1623 dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1624 return ret;
1625 }
1626 }
1627
1628 /* issue Run*Btc msg */
1629 ret = smu_run_btc(smu);
1630 if (ret)
1631 return ret;
1632
1633 /* Enable UclkShadow on wbrf supported */
1634 if (smu->wbrf_supported) {
1635 ret = smu_enable_uclk_shadow(smu, true);
1636 if (ret) {
1637 dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1638 return ret;
1639 }
1640 }
1641
1642 /*
1643 * With SCPM enabled, these actions(and relevant messages) are
1644 * not needed and permitted.
1645 */
1646 if (!adev->scpm_enabled) {
1647 ret = smu_feature_set_allowed_mask(smu);
1648 if (ret) {
1649 dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1650 return ret;
1651 }
1652 }
1653
1654 ret = smu_system_features_control(smu, true);
1655 if (ret) {
1656 dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1657 return ret;
1658 }
1659
1660 smu_init_xgmi_plpd_mode(smu);
1661
1662 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1663 if (ret) {
1664 dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1665 return ret;
1666 }
1667 bitmap_copy(feature->supported,
1668 (unsigned long *)&features_supported,
1669 feature->feature_num);
1670
1671 if (!smu_is_dpm_running(smu))
1672 dev_info(adev->dev, "dpm has been disabled\n");
1673
1674 /*
1675 * Set initialized values (get from vbios) to dpm tables context such as
1676 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1677 * type of clks.
1678 */
1679 ret = smu_set_default_dpm_table(smu);
1680 if (ret) {
1681 dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1682 return ret;
1683 }
1684
1685 if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1686 pcie_gen = 3;
1687 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1688 pcie_gen = 2;
1689 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1690 pcie_gen = 1;
1691 else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1692 pcie_gen = 0;
1693
1694 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1695 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1696 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1697 */
1698 if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1699 pcie_width = 6;
1700 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1701 pcie_width = 5;
1702 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1703 pcie_width = 4;
1704 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1705 pcie_width = 3;
1706 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1707 pcie_width = 2;
1708 else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1709 pcie_width = 1;
1710 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1711 if (ret) {
1712 dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1713 return ret;
1714 }
1715
1716 ret = smu_get_thermal_temperature_range(smu);
1717 if (ret) {
1718 dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1719 return ret;
1720 }
1721
1722 ret = smu_enable_thermal_alert(smu);
1723 if (ret) {
1724 dev_err(adev->dev, "Failed to enable thermal alert!\n");
1725 return ret;
1726 }
1727
1728 ret = smu_notify_display_change(smu);
1729 if (ret) {
1730 dev_err(adev->dev, "Failed to notify display change!\n");
1731 return ret;
1732 }
1733
1734 /*
1735 * Set min deep sleep dce fclk with bootup value from vbios via
1736 * SetMinDeepSleepDcefclk MSG.
1737 */
1738 ret = smu_set_min_dcef_deep_sleep(smu,
1739 smu->smu_table.boot_values.dcefclk / 100);
1740 if (ret) {
1741 dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1742 return ret;
1743 }
1744
1745 /* Init wbrf support. Properly setup the notifier */
1746 ret = smu_wbrf_init(smu);
1747 if (ret)
1748 dev_err(adev->dev, "Error during wbrf init call\n");
1749
1750 return ret;
1751 }
1752
smu_start_smc_engine(struct smu_context * smu)1753 static int smu_start_smc_engine(struct smu_context *smu)
1754 {
1755 struct amdgpu_device *adev = smu->adev;
1756 int ret = 0;
1757
1758 smu->smc_fw_state = SMU_FW_INIT;
1759
1760 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1761 if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1762 if (smu->ppt_funcs->load_microcode) {
1763 ret = smu->ppt_funcs->load_microcode(smu);
1764 if (ret)
1765 return ret;
1766 }
1767 }
1768 }
1769
1770 if (smu->ppt_funcs->check_fw_status) {
1771 ret = smu->ppt_funcs->check_fw_status(smu);
1772 if (ret) {
1773 dev_err(adev->dev, "SMC is not ready\n");
1774 return ret;
1775 }
1776 }
1777
1778 /*
1779 * Send msg GetDriverIfVersion to check if the return value is equal
1780 * with DRIVER_IF_VERSION of smc header.
1781 */
1782 ret = smu_check_fw_version(smu);
1783 if (ret)
1784 return ret;
1785
1786 return ret;
1787 }
1788
smu_hw_init(void * handle)1789 static int smu_hw_init(void *handle)
1790 {
1791 int ret;
1792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1793 struct smu_context *smu = adev->powerplay.pp_handle;
1794
1795 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1796 smu->pm_enabled = false;
1797 return 0;
1798 }
1799
1800 ret = smu_start_smc_engine(smu);
1801 if (ret) {
1802 dev_err(adev->dev, "SMC engine is not correctly up!\n");
1803 return ret;
1804 }
1805
1806 /*
1807 * Check whether wbrf is supported. This needs to be done
1808 * before SMU setup starts since part of SMU configuration
1809 * relies on this.
1810 */
1811 smu_wbrf_support_check(smu);
1812
1813 if (smu->is_apu) {
1814 ret = smu_set_gfx_imu_enable(smu);
1815 if (ret)
1816 return ret;
1817 smu_dpm_set_vcn_enable(smu, true);
1818 smu_dpm_set_jpeg_enable(smu, true);
1819 smu_dpm_set_vpe_enable(smu, true);
1820 smu_dpm_set_umsch_mm_enable(smu, true);
1821 smu_set_mall_enable(smu);
1822 smu_set_gfx_cgpg(smu, true);
1823 }
1824
1825 if (!smu->pm_enabled)
1826 return 0;
1827
1828 ret = smu_get_driver_allowed_feature_mask(smu);
1829 if (ret)
1830 return ret;
1831
1832 ret = smu_smc_hw_setup(smu);
1833 if (ret) {
1834 dev_err(adev->dev, "Failed to setup smc hw!\n");
1835 return ret;
1836 }
1837
1838 /*
1839 * Move maximum sustainable clock retrieving here considering
1840 * 1. It is not needed on resume(from S3).
1841 * 2. DAL settings come between .hw_init and .late_init of SMU.
1842 * And DAL needs to know the maximum sustainable clocks. Thus
1843 * it cannot be put in .late_init().
1844 */
1845 ret = smu_init_max_sustainable_clocks(smu);
1846 if (ret) {
1847 dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1848 return ret;
1849 }
1850
1851 adev->pm.dpm_enabled = true;
1852
1853 dev_info(adev->dev, "SMU is initialized successfully!\n");
1854
1855 return 0;
1856 }
1857
smu_disable_dpms(struct smu_context * smu)1858 static int smu_disable_dpms(struct smu_context *smu)
1859 {
1860 struct amdgpu_device *adev = smu->adev;
1861 int ret = 0;
1862 bool use_baco = !smu->is_apu &&
1863 ((amdgpu_in_reset(adev) &&
1864 (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1865 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1866
1867 /*
1868 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1869 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1870 */
1871 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1872 case IP_VERSION(13, 0, 0):
1873 case IP_VERSION(13, 0, 7):
1874 case IP_VERSION(13, 0, 10):
1875 case IP_VERSION(14, 0, 2):
1876 case IP_VERSION(14, 0, 3):
1877 return 0;
1878 default:
1879 break;
1880 }
1881
1882 /*
1883 * For custom pptable uploading, skip the DPM features
1884 * disable process on Navi1x ASICs.
1885 * - As the gfx related features are under control of
1886 * RLC on those ASICs. RLC reinitialization will be
1887 * needed to reenable them. That will cost much more
1888 * efforts.
1889 *
1890 * - SMU firmware can handle the DPM reenablement
1891 * properly.
1892 */
1893 if (smu->uploading_custom_pp_table) {
1894 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1895 case IP_VERSION(11, 0, 0):
1896 case IP_VERSION(11, 0, 5):
1897 case IP_VERSION(11, 0, 9):
1898 case IP_VERSION(11, 0, 7):
1899 case IP_VERSION(11, 0, 11):
1900 case IP_VERSION(11, 5, 0):
1901 case IP_VERSION(11, 0, 12):
1902 case IP_VERSION(11, 0, 13):
1903 return 0;
1904 default:
1905 break;
1906 }
1907 }
1908
1909 /*
1910 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1911 * on BACO in. Driver involvement is unnecessary.
1912 */
1913 if (use_baco) {
1914 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1915 case IP_VERSION(11, 0, 7):
1916 case IP_VERSION(11, 0, 0):
1917 case IP_VERSION(11, 0, 5):
1918 case IP_VERSION(11, 0, 9):
1919 case IP_VERSION(13, 0, 7):
1920 return 0;
1921 default:
1922 break;
1923 }
1924 }
1925
1926 /*
1927 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
1928 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1929 */
1930 if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
1931 smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
1932 return 0;
1933
1934 /*
1935 * For gpu reset, runpm and hibernation through BACO,
1936 * BACO feature has to be kept enabled.
1937 */
1938 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1939 ret = smu_disable_all_features_with_exception(smu,
1940 SMU_FEATURE_BACO_BIT);
1941 if (ret)
1942 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1943 } else {
1944 /* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1945 if (!adev->scpm_enabled) {
1946 ret = smu_system_features_control(smu, false);
1947 if (ret)
1948 dev_err(adev->dev, "Failed to disable smu features.\n");
1949 }
1950 }
1951
1952 /* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
1953 * otherwise SMU will hang while interacting with RLC if RLC is halted
1954 * this is a WA for Vangogh asic which fix the SMU hang issue.
1955 */
1956 ret = smu_notify_rlc_state(smu, false);
1957 if (ret) {
1958 dev_err(adev->dev, "Fail to notify rlc status!\n");
1959 return ret;
1960 }
1961
1962 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
1963 !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
1964 !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1965 adev->gfx.rlc.funcs->stop(adev);
1966
1967 return ret;
1968 }
1969
smu_smc_hw_cleanup(struct smu_context * smu)1970 static int smu_smc_hw_cleanup(struct smu_context *smu)
1971 {
1972 struct amdgpu_device *adev = smu->adev;
1973 int ret = 0;
1974
1975 smu_wbrf_fini(smu);
1976
1977 cancel_work_sync(&smu->throttling_logging_work);
1978 cancel_work_sync(&smu->interrupt_work);
1979
1980 ret = smu_disable_thermal_alert(smu);
1981 if (ret) {
1982 dev_err(adev->dev, "Fail to disable thermal alert!\n");
1983 return ret;
1984 }
1985
1986 cancel_delayed_work_sync(&smu->swctf_delayed_work);
1987
1988 ret = smu_disable_dpms(smu);
1989 if (ret) {
1990 dev_err(adev->dev, "Fail to disable dpm features!\n");
1991 return ret;
1992 }
1993
1994 return 0;
1995 }
1996
smu_reset_mp1_state(struct smu_context * smu)1997 static int smu_reset_mp1_state(struct smu_context *smu)
1998 {
1999 struct amdgpu_device *adev = smu->adev;
2000 int ret = 0;
2001
2002 if ((!adev->in_runpm) && (!adev->in_suspend) &&
2003 (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2004 IP_VERSION(13, 0, 10) &&
2005 !amdgpu_device_has_display_hardware(adev))
2006 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2007
2008 return ret;
2009 }
2010
smu_hw_fini(void * handle)2011 static int smu_hw_fini(void *handle)
2012 {
2013 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2014 struct smu_context *smu = adev->powerplay.pp_handle;
2015 int ret;
2016
2017 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2018 return 0;
2019
2020 smu_dpm_set_vcn_enable(smu, false);
2021 smu_dpm_set_jpeg_enable(smu, false);
2022 smu_dpm_set_vpe_enable(smu, false);
2023 smu_dpm_set_umsch_mm_enable(smu, false);
2024
2025 adev->vcn.cur_state = AMD_PG_STATE_GATE;
2026 adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2027
2028 if (!smu->pm_enabled)
2029 return 0;
2030
2031 adev->pm.dpm_enabled = false;
2032
2033 ret = smu_smc_hw_cleanup(smu);
2034 if (ret)
2035 return ret;
2036
2037 ret = smu_reset_mp1_state(smu);
2038 if (ret)
2039 return ret;
2040
2041 return 0;
2042 }
2043
smu_late_fini(void * handle)2044 static void smu_late_fini(void *handle)
2045 {
2046 struct amdgpu_device *adev = handle;
2047 struct smu_context *smu = adev->powerplay.pp_handle;
2048
2049 kfree(smu);
2050 }
2051
smu_reset(struct smu_context * smu)2052 static int smu_reset(struct smu_context *smu)
2053 {
2054 struct amdgpu_device *adev = smu->adev;
2055 int ret;
2056
2057 ret = smu_hw_fini(adev);
2058 if (ret)
2059 return ret;
2060
2061 ret = smu_hw_init(adev);
2062 if (ret)
2063 return ret;
2064
2065 ret = smu_late_init(adev);
2066 if (ret)
2067 return ret;
2068
2069 return 0;
2070 }
2071
smu_suspend(void * handle)2072 static int smu_suspend(void *handle)
2073 {
2074 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2075 struct smu_context *smu = adev->powerplay.pp_handle;
2076 int ret;
2077 uint64_t count;
2078
2079 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2080 return 0;
2081
2082 if (!smu->pm_enabled)
2083 return 0;
2084
2085 adev->pm.dpm_enabled = false;
2086
2087 ret = smu_smc_hw_cleanup(smu);
2088 if (ret)
2089 return ret;
2090
2091 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2092
2093 smu_set_gfx_cgpg(smu, false);
2094
2095 /*
2096 * pwfw resets entrycount when device is suspended, so we save the
2097 * last value to be used when we resume to keep it consistent
2098 */
2099 ret = smu_get_entrycount_gfxoff(smu, &count);
2100 if (!ret)
2101 adev->gfx.gfx_off_entrycount = count;
2102
2103 return 0;
2104 }
2105
smu_resume(void * handle)2106 static int smu_resume(void *handle)
2107 {
2108 int ret;
2109 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2110 struct smu_context *smu = adev->powerplay.pp_handle;
2111
2112 if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
2113 return 0;
2114
2115 if (!smu->pm_enabled)
2116 return 0;
2117
2118 dev_info(adev->dev, "SMU is resuming...\n");
2119
2120 ret = smu_start_smc_engine(smu);
2121 if (ret) {
2122 dev_err(adev->dev, "SMC engine is not correctly up!\n");
2123 return ret;
2124 }
2125
2126 ret = smu_smc_hw_setup(smu);
2127 if (ret) {
2128 dev_err(adev->dev, "Failed to setup smc hw!\n");
2129 return ret;
2130 }
2131
2132 ret = smu_set_gfx_imu_enable(smu);
2133 if (ret)
2134 return ret;
2135
2136 smu_set_gfx_cgpg(smu, true);
2137
2138 smu->disable_uclk_switch = 0;
2139
2140 adev->pm.dpm_enabled = true;
2141
2142 dev_info(adev->dev, "SMU is resumed successfully!\n");
2143
2144 return 0;
2145 }
2146
smu_display_configuration_change(void * handle,const struct amd_pp_display_configuration * display_config)2147 static int smu_display_configuration_change(void *handle,
2148 const struct amd_pp_display_configuration *display_config)
2149 {
2150 struct smu_context *smu = handle;
2151
2152 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2153 return -EOPNOTSUPP;
2154
2155 if (!display_config)
2156 return -EINVAL;
2157
2158 smu_set_min_dcef_deep_sleep(smu,
2159 display_config->min_dcef_deep_sleep_set_clk / 100);
2160
2161 return 0;
2162 }
2163
smu_set_clockgating_state(void * handle,enum amd_clockgating_state state)2164 static int smu_set_clockgating_state(void *handle,
2165 enum amd_clockgating_state state)
2166 {
2167 return 0;
2168 }
2169
smu_set_powergating_state(void * handle,enum amd_powergating_state state)2170 static int smu_set_powergating_state(void *handle,
2171 enum amd_powergating_state state)
2172 {
2173 return 0;
2174 }
2175
smu_enable_umd_pstate(void * handle,enum amd_dpm_forced_level * level)2176 static int smu_enable_umd_pstate(void *handle,
2177 enum amd_dpm_forced_level *level)
2178 {
2179 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2180 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2181 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2182 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2183
2184 struct smu_context *smu = (struct smu_context*)(handle);
2185 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2186
2187 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2188 return -EINVAL;
2189
2190 if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2191 /* enter umd pstate, save current level, disable gfx cg*/
2192 if (*level & profile_mode_mask) {
2193 smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2194 smu_gpo_control(smu, false);
2195 smu_gfx_ulv_control(smu, false);
2196 smu_deep_sleep_control(smu, false);
2197 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2198 }
2199 } else {
2200 /* exit umd pstate, restore level, enable gfx cg*/
2201 if (!(*level & profile_mode_mask)) {
2202 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2203 *level = smu_dpm_ctx->saved_dpm_level;
2204 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2205 smu_deep_sleep_control(smu, true);
2206 smu_gfx_ulv_control(smu, true);
2207 smu_gpo_control(smu, true);
2208 }
2209 }
2210
2211 return 0;
2212 }
2213
smu_bump_power_profile_mode(struct smu_context * smu,long * param,uint32_t param_size)2214 static int smu_bump_power_profile_mode(struct smu_context *smu,
2215 long *param,
2216 uint32_t param_size)
2217 {
2218 int ret = 0;
2219
2220 if (smu->ppt_funcs->set_power_profile_mode)
2221 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2222
2223 return ret;
2224 }
2225
smu_adjust_power_state_dynamic(struct smu_context * smu,enum amd_dpm_forced_level level,bool skip_display_settings,bool force_update)2226 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2227 enum amd_dpm_forced_level level,
2228 bool skip_display_settings,
2229 bool force_update)
2230 {
2231 int ret = 0;
2232 int index = 0;
2233 long workload[1];
2234 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2235
2236 if (!skip_display_settings) {
2237 ret = smu_display_config_changed(smu);
2238 if (ret) {
2239 dev_err(smu->adev->dev, "Failed to change display config!");
2240 return ret;
2241 }
2242 }
2243
2244 ret = smu_apply_clocks_adjust_rules(smu);
2245 if (ret) {
2246 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2247 return ret;
2248 }
2249
2250 if (!skip_display_settings) {
2251 ret = smu_notify_smc_display_config(smu);
2252 if (ret) {
2253 dev_err(smu->adev->dev, "Failed to notify smc display config!");
2254 return ret;
2255 }
2256 }
2257
2258 if (force_update || smu_dpm_ctx->dpm_level != level) {
2259 ret = smu_asic_set_performance_level(smu, level);
2260 if (ret) {
2261 dev_err(smu->adev->dev, "Failed to set performance level!");
2262 return ret;
2263 }
2264
2265 /* update the saved copy */
2266 smu_dpm_ctx->dpm_level = level;
2267 }
2268
2269 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2270 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2271 index = fls(smu->workload_mask);
2272 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2273 workload[0] = smu->workload_setting[index];
2274
2275 if (force_update || smu->power_profile_mode != workload[0])
2276 smu_bump_power_profile_mode(smu, workload, 0);
2277 }
2278
2279 return ret;
2280 }
2281
smu_handle_task(struct smu_context * smu,enum amd_dpm_forced_level level,enum amd_pp_task task_id)2282 static int smu_handle_task(struct smu_context *smu,
2283 enum amd_dpm_forced_level level,
2284 enum amd_pp_task task_id)
2285 {
2286 int ret = 0;
2287
2288 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2289 return -EOPNOTSUPP;
2290
2291 switch (task_id) {
2292 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2293 ret = smu_pre_display_config_changed(smu);
2294 if (ret)
2295 return ret;
2296 ret = smu_adjust_power_state_dynamic(smu, level, false, false);
2297 break;
2298 case AMD_PP_TASK_COMPLETE_INIT:
2299 ret = smu_adjust_power_state_dynamic(smu, level, true, true);
2300 break;
2301 case AMD_PP_TASK_READJUST_POWER_STATE:
2302 ret = smu_adjust_power_state_dynamic(smu, level, true, false);
2303 break;
2304 default:
2305 break;
2306 }
2307
2308 return ret;
2309 }
2310
smu_handle_dpm_task(void * handle,enum amd_pp_task task_id,enum amd_pm_state_type * user_state)2311 static int smu_handle_dpm_task(void *handle,
2312 enum amd_pp_task task_id,
2313 enum amd_pm_state_type *user_state)
2314 {
2315 struct smu_context *smu = handle;
2316 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2317
2318 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2319
2320 }
2321
smu_switch_power_profile(void * handle,enum PP_SMC_POWER_PROFILE type,bool en)2322 static int smu_switch_power_profile(void *handle,
2323 enum PP_SMC_POWER_PROFILE type,
2324 bool en)
2325 {
2326 struct smu_context *smu = handle;
2327 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2328 long workload[1];
2329 uint32_t index;
2330
2331 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2332 return -EOPNOTSUPP;
2333
2334 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2335 return -EINVAL;
2336
2337 if (!en) {
2338 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
2339 index = fls(smu->workload_mask);
2340 index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2341 workload[0] = smu->workload_setting[index];
2342 } else {
2343 smu->workload_mask |= (1 << smu->workload_prority[type]);
2344 index = fls(smu->workload_mask);
2345 index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2346 workload[0] = smu->workload_setting[index];
2347 }
2348
2349 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2350 smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2351 smu_bump_power_profile_mode(smu, workload, 0);
2352
2353 return 0;
2354 }
2355
smu_get_performance_level(void * handle)2356 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2357 {
2358 struct smu_context *smu = handle;
2359 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2360
2361 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2362 return -EOPNOTSUPP;
2363
2364 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2365 return -EINVAL;
2366
2367 return smu_dpm_ctx->dpm_level;
2368 }
2369
smu_force_performance_level(void * handle,enum amd_dpm_forced_level level)2370 static int smu_force_performance_level(void *handle,
2371 enum amd_dpm_forced_level level)
2372 {
2373 struct smu_context *smu = handle;
2374 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2375 int ret = 0;
2376
2377 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2378 return -EOPNOTSUPP;
2379
2380 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2381 return -EINVAL;
2382
2383 ret = smu_enable_umd_pstate(smu, &level);
2384 if (ret)
2385 return ret;
2386
2387 ret = smu_handle_task(smu, level,
2388 AMD_PP_TASK_READJUST_POWER_STATE);
2389
2390 /* reset user dpm clock state */
2391 if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2392 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2393 smu->user_dpm_profile.clk_dependency = 0;
2394 }
2395
2396 return ret;
2397 }
2398
smu_set_display_count(void * handle,uint32_t count)2399 static int smu_set_display_count(void *handle, uint32_t count)
2400 {
2401 struct smu_context *smu = handle;
2402
2403 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2404 return -EOPNOTSUPP;
2405
2406 return smu_init_display_count(smu, count);
2407 }
2408
smu_force_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)2409 static int smu_force_smuclk_levels(struct smu_context *smu,
2410 enum smu_clk_type clk_type,
2411 uint32_t mask)
2412 {
2413 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2414 int ret = 0;
2415
2416 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2417 return -EOPNOTSUPP;
2418
2419 if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2420 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2421 return -EINVAL;
2422 }
2423
2424 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2425 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2426 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2427 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2428 smu_set_user_clk_dependencies(smu, clk_type);
2429 }
2430 }
2431
2432 return ret;
2433 }
2434
smu_force_ppclk_levels(void * handle,enum pp_clock_type type,uint32_t mask)2435 static int smu_force_ppclk_levels(void *handle,
2436 enum pp_clock_type type,
2437 uint32_t mask)
2438 {
2439 struct smu_context *smu = handle;
2440 enum smu_clk_type clk_type;
2441
2442 switch (type) {
2443 case PP_SCLK:
2444 clk_type = SMU_SCLK; break;
2445 case PP_MCLK:
2446 clk_type = SMU_MCLK; break;
2447 case PP_PCIE:
2448 clk_type = SMU_PCIE; break;
2449 case PP_SOCCLK:
2450 clk_type = SMU_SOCCLK; break;
2451 case PP_FCLK:
2452 clk_type = SMU_FCLK; break;
2453 case PP_DCEFCLK:
2454 clk_type = SMU_DCEFCLK; break;
2455 case PP_VCLK:
2456 clk_type = SMU_VCLK; break;
2457 case PP_VCLK1:
2458 clk_type = SMU_VCLK1; break;
2459 case PP_DCLK:
2460 clk_type = SMU_DCLK; break;
2461 case PP_DCLK1:
2462 clk_type = SMU_DCLK1; break;
2463 case OD_SCLK:
2464 clk_type = SMU_OD_SCLK; break;
2465 case OD_MCLK:
2466 clk_type = SMU_OD_MCLK; break;
2467 case OD_VDDC_CURVE:
2468 clk_type = SMU_OD_VDDC_CURVE; break;
2469 case OD_RANGE:
2470 clk_type = SMU_OD_RANGE; break;
2471 default:
2472 return -EINVAL;
2473 }
2474
2475 return smu_force_smuclk_levels(smu, clk_type, mask);
2476 }
2477
2478 /*
2479 * On system suspending or resetting, the dpm_enabled
2480 * flag will be cleared. So that those SMU services which
2481 * are not supported will be gated.
2482 * However, the mp1 state setting should still be granted
2483 * even if the dpm_enabled cleared.
2484 */
smu_set_mp1_state(void * handle,enum pp_mp1_state mp1_state)2485 static int smu_set_mp1_state(void *handle,
2486 enum pp_mp1_state mp1_state)
2487 {
2488 struct smu_context *smu = handle;
2489 int ret = 0;
2490
2491 if (!smu->pm_enabled)
2492 return -EOPNOTSUPP;
2493
2494 if (smu->ppt_funcs &&
2495 smu->ppt_funcs->set_mp1_state)
2496 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2497
2498 return ret;
2499 }
2500
smu_set_df_cstate(void * handle,enum pp_df_cstate state)2501 static int smu_set_df_cstate(void *handle,
2502 enum pp_df_cstate state)
2503 {
2504 struct smu_context *smu = handle;
2505 int ret = 0;
2506
2507 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2508 return -EOPNOTSUPP;
2509
2510 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2511 return 0;
2512
2513 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2514 if (ret)
2515 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2516
2517 return ret;
2518 }
2519
smu_write_watermarks_table(struct smu_context * smu)2520 int smu_write_watermarks_table(struct smu_context *smu)
2521 {
2522 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2523 return -EOPNOTSUPP;
2524
2525 return smu_set_watermarks_table(smu, NULL);
2526 }
2527
smu_set_watermarks_for_clock_ranges(void * handle,struct pp_smu_wm_range_sets * clock_ranges)2528 static int smu_set_watermarks_for_clock_ranges(void *handle,
2529 struct pp_smu_wm_range_sets *clock_ranges)
2530 {
2531 struct smu_context *smu = handle;
2532
2533 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2534 return -EOPNOTSUPP;
2535
2536 if (smu->disable_watermark)
2537 return 0;
2538
2539 return smu_set_watermarks_table(smu, clock_ranges);
2540 }
2541
smu_set_ac_dc(struct smu_context * smu)2542 int smu_set_ac_dc(struct smu_context *smu)
2543 {
2544 int ret = 0;
2545
2546 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2547 return -EOPNOTSUPP;
2548
2549 /* controlled by firmware */
2550 if (smu->dc_controlled_by_gpio)
2551 return 0;
2552
2553 ret = smu_set_power_source(smu,
2554 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2555 SMU_POWER_SOURCE_DC);
2556 if (ret)
2557 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2558 smu->adev->pm.ac_power ? "AC" : "DC");
2559
2560 return ret;
2561 }
2562
2563 const struct amd_ip_funcs smu_ip_funcs = {
2564 .name = "smu",
2565 .early_init = smu_early_init,
2566 .late_init = smu_late_init,
2567 .sw_init = smu_sw_init,
2568 .sw_fini = smu_sw_fini,
2569 .hw_init = smu_hw_init,
2570 .hw_fini = smu_hw_fini,
2571 .late_fini = smu_late_fini,
2572 .suspend = smu_suspend,
2573 .resume = smu_resume,
2574 .is_idle = NULL,
2575 .check_soft_reset = NULL,
2576 .wait_for_idle = NULL,
2577 .soft_reset = NULL,
2578 .set_clockgating_state = smu_set_clockgating_state,
2579 .set_powergating_state = smu_set_powergating_state,
2580 };
2581
2582 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2583 .type = AMD_IP_BLOCK_TYPE_SMC,
2584 .major = 11,
2585 .minor = 0,
2586 .rev = 0,
2587 .funcs = &smu_ip_funcs,
2588 };
2589
2590 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2591 .type = AMD_IP_BLOCK_TYPE_SMC,
2592 .major = 12,
2593 .minor = 0,
2594 .rev = 0,
2595 .funcs = &smu_ip_funcs,
2596 };
2597
2598 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2599 .type = AMD_IP_BLOCK_TYPE_SMC,
2600 .major = 13,
2601 .minor = 0,
2602 .rev = 0,
2603 .funcs = &smu_ip_funcs,
2604 };
2605
2606 const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2607 .type = AMD_IP_BLOCK_TYPE_SMC,
2608 .major = 14,
2609 .minor = 0,
2610 .rev = 0,
2611 .funcs = &smu_ip_funcs,
2612 };
2613
smu_load_microcode(void * handle)2614 static int smu_load_microcode(void *handle)
2615 {
2616 struct smu_context *smu = handle;
2617 struct amdgpu_device *adev = smu->adev;
2618 int ret = 0;
2619
2620 if (!smu->pm_enabled)
2621 return -EOPNOTSUPP;
2622
2623 /* This should be used for non PSP loading */
2624 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2625 return 0;
2626
2627 if (smu->ppt_funcs->load_microcode) {
2628 ret = smu->ppt_funcs->load_microcode(smu);
2629 if (ret) {
2630 dev_err(adev->dev, "Load microcode failed\n");
2631 return ret;
2632 }
2633 }
2634
2635 if (smu->ppt_funcs->check_fw_status) {
2636 ret = smu->ppt_funcs->check_fw_status(smu);
2637 if (ret) {
2638 dev_err(adev->dev, "SMC is not ready\n");
2639 return ret;
2640 }
2641 }
2642
2643 return ret;
2644 }
2645
smu_set_gfx_cgpg(struct smu_context * smu,bool enabled)2646 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2647 {
2648 int ret = 0;
2649
2650 if (smu->ppt_funcs->set_gfx_cgpg)
2651 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2652
2653 return ret;
2654 }
2655
smu_set_fan_speed_rpm(void * handle,uint32_t speed)2656 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2657 {
2658 struct smu_context *smu = handle;
2659 int ret = 0;
2660
2661 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2662 return -EOPNOTSUPP;
2663
2664 if (!smu->ppt_funcs->set_fan_speed_rpm)
2665 return -EOPNOTSUPP;
2666
2667 if (speed == U32_MAX)
2668 return -EINVAL;
2669
2670 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2671 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2672 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2673 smu->user_dpm_profile.fan_speed_rpm = speed;
2674
2675 /* Override custom PWM setting as they cannot co-exist */
2676 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2677 smu->user_dpm_profile.fan_speed_pwm = 0;
2678 }
2679
2680 return ret;
2681 }
2682
2683 /**
2684 * smu_get_power_limit - Request one of the SMU Power Limits
2685 *
2686 * @handle: pointer to smu context
2687 * @limit: requested limit is written back to this variable
2688 * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2689 * @pp_power_type: &pp_power_type type of power
2690 * Return: 0 on success, <0 on error
2691 *
2692 */
smu_get_power_limit(void * handle,uint32_t * limit,enum pp_power_limit_level pp_limit_level,enum pp_power_type pp_power_type)2693 int smu_get_power_limit(void *handle,
2694 uint32_t *limit,
2695 enum pp_power_limit_level pp_limit_level,
2696 enum pp_power_type pp_power_type)
2697 {
2698 struct smu_context *smu = handle;
2699 struct amdgpu_device *adev = smu->adev;
2700 enum smu_ppt_limit_level limit_level;
2701 uint32_t limit_type;
2702 int ret = 0;
2703
2704 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2705 return -EOPNOTSUPP;
2706
2707 switch (pp_power_type) {
2708 case PP_PWR_TYPE_SUSTAINED:
2709 limit_type = SMU_DEFAULT_PPT_LIMIT;
2710 break;
2711 case PP_PWR_TYPE_FAST:
2712 limit_type = SMU_FAST_PPT_LIMIT;
2713 break;
2714 default:
2715 return -EOPNOTSUPP;
2716 }
2717
2718 switch (pp_limit_level) {
2719 case PP_PWR_LIMIT_CURRENT:
2720 limit_level = SMU_PPT_LIMIT_CURRENT;
2721 break;
2722 case PP_PWR_LIMIT_DEFAULT:
2723 limit_level = SMU_PPT_LIMIT_DEFAULT;
2724 break;
2725 case PP_PWR_LIMIT_MAX:
2726 limit_level = SMU_PPT_LIMIT_MAX;
2727 break;
2728 case PP_PWR_LIMIT_MIN:
2729 limit_level = SMU_PPT_LIMIT_MIN;
2730 break;
2731 default:
2732 return -EOPNOTSUPP;
2733 }
2734
2735 if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2736 if (smu->ppt_funcs->get_ppt_limit)
2737 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2738 } else {
2739 switch (limit_level) {
2740 case SMU_PPT_LIMIT_CURRENT:
2741 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2742 case IP_VERSION(13, 0, 2):
2743 case IP_VERSION(13, 0, 6):
2744 case IP_VERSION(13, 0, 14):
2745 case IP_VERSION(11, 0, 7):
2746 case IP_VERSION(11, 0, 11):
2747 case IP_VERSION(11, 0, 12):
2748 case IP_VERSION(11, 0, 13):
2749 ret = smu_get_asic_power_limits(smu,
2750 &smu->current_power_limit,
2751 NULL, NULL, NULL);
2752 break;
2753 default:
2754 break;
2755 }
2756 *limit = smu->current_power_limit;
2757 break;
2758 case SMU_PPT_LIMIT_DEFAULT:
2759 *limit = smu->default_power_limit;
2760 break;
2761 case SMU_PPT_LIMIT_MAX:
2762 *limit = smu->max_power_limit;
2763 break;
2764 case SMU_PPT_LIMIT_MIN:
2765 *limit = smu->min_power_limit;
2766 break;
2767 default:
2768 return -EINVAL;
2769 }
2770 }
2771
2772 return ret;
2773 }
2774
smu_set_power_limit(void * handle,uint32_t limit)2775 static int smu_set_power_limit(void *handle, uint32_t limit)
2776 {
2777 struct smu_context *smu = handle;
2778 uint32_t limit_type = limit >> 24;
2779 int ret = 0;
2780
2781 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2782 return -EOPNOTSUPP;
2783
2784 limit &= (1<<24)-1;
2785 if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2786 if (smu->ppt_funcs->set_power_limit)
2787 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2788
2789 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2790 dev_err(smu->adev->dev,
2791 "New power limit (%d) is out of range [%d,%d]\n",
2792 limit, smu->min_power_limit, smu->max_power_limit);
2793 return -EINVAL;
2794 }
2795
2796 if (!limit)
2797 limit = smu->current_power_limit;
2798
2799 if (smu->ppt_funcs->set_power_limit) {
2800 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2801 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2802 smu->user_dpm_profile.power_limit = limit;
2803 }
2804
2805 return ret;
2806 }
2807
smu_print_smuclk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)2808 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2809 {
2810 int ret = 0;
2811
2812 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2813 return -EOPNOTSUPP;
2814
2815 if (smu->ppt_funcs->print_clk_levels)
2816 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2817
2818 return ret;
2819 }
2820
smu_convert_to_smuclk(enum pp_clock_type type)2821 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2822 {
2823 enum smu_clk_type clk_type;
2824
2825 switch (type) {
2826 case PP_SCLK:
2827 clk_type = SMU_SCLK; break;
2828 case PP_MCLK:
2829 clk_type = SMU_MCLK; break;
2830 case PP_PCIE:
2831 clk_type = SMU_PCIE; break;
2832 case PP_SOCCLK:
2833 clk_type = SMU_SOCCLK; break;
2834 case PP_FCLK:
2835 clk_type = SMU_FCLK; break;
2836 case PP_DCEFCLK:
2837 clk_type = SMU_DCEFCLK; break;
2838 case PP_VCLK:
2839 clk_type = SMU_VCLK; break;
2840 case PP_VCLK1:
2841 clk_type = SMU_VCLK1; break;
2842 case PP_DCLK:
2843 clk_type = SMU_DCLK; break;
2844 case PP_DCLK1:
2845 clk_type = SMU_DCLK1; break;
2846 case OD_SCLK:
2847 clk_type = SMU_OD_SCLK; break;
2848 case OD_MCLK:
2849 clk_type = SMU_OD_MCLK; break;
2850 case OD_VDDC_CURVE:
2851 clk_type = SMU_OD_VDDC_CURVE; break;
2852 case OD_RANGE:
2853 clk_type = SMU_OD_RANGE; break;
2854 case OD_VDDGFX_OFFSET:
2855 clk_type = SMU_OD_VDDGFX_OFFSET; break;
2856 case OD_CCLK:
2857 clk_type = SMU_OD_CCLK; break;
2858 case OD_FAN_CURVE:
2859 clk_type = SMU_OD_FAN_CURVE; break;
2860 case OD_ACOUSTIC_LIMIT:
2861 clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
2862 case OD_ACOUSTIC_TARGET:
2863 clk_type = SMU_OD_ACOUSTIC_TARGET; break;
2864 case OD_FAN_TARGET_TEMPERATURE:
2865 clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
2866 case OD_FAN_MINIMUM_PWM:
2867 clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
2868 default:
2869 clk_type = SMU_CLK_COUNT; break;
2870 }
2871
2872 return clk_type;
2873 }
2874
smu_print_ppclk_levels(void * handle,enum pp_clock_type type,char * buf)2875 static int smu_print_ppclk_levels(void *handle,
2876 enum pp_clock_type type,
2877 char *buf)
2878 {
2879 struct smu_context *smu = handle;
2880 enum smu_clk_type clk_type;
2881
2882 clk_type = smu_convert_to_smuclk(type);
2883 if (clk_type == SMU_CLK_COUNT)
2884 return -EINVAL;
2885
2886 return smu_print_smuclk_levels(smu, clk_type, buf);
2887 }
2888
smu_emit_ppclk_levels(void * handle,enum pp_clock_type type,char * buf,int * offset)2889 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2890 {
2891 struct smu_context *smu = handle;
2892 enum smu_clk_type clk_type;
2893
2894 clk_type = smu_convert_to_smuclk(type);
2895 if (clk_type == SMU_CLK_COUNT)
2896 return -EINVAL;
2897
2898 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2899 return -EOPNOTSUPP;
2900
2901 if (!smu->ppt_funcs->emit_clk_levels)
2902 return -ENOENT;
2903
2904 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2905
2906 }
2907
smu_od_edit_dpm_table(void * handle,enum PP_OD_DPM_TABLE_COMMAND type,long * input,uint32_t size)2908 static int smu_od_edit_dpm_table(void *handle,
2909 enum PP_OD_DPM_TABLE_COMMAND type,
2910 long *input, uint32_t size)
2911 {
2912 struct smu_context *smu = handle;
2913 int ret = 0;
2914
2915 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2916 return -EOPNOTSUPP;
2917
2918 if (smu->ppt_funcs->od_edit_dpm_table) {
2919 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2920 }
2921
2922 return ret;
2923 }
2924
smu_read_sensor(void * handle,int sensor,void * data,int * size_arg)2925 static int smu_read_sensor(void *handle,
2926 int sensor,
2927 void *data,
2928 int *size_arg)
2929 {
2930 struct smu_context *smu = handle;
2931 struct smu_umd_pstate_table *pstate_table =
2932 &smu->pstate_table;
2933 int ret = 0;
2934 uint32_t *size, size_val;
2935
2936 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2937 return -EOPNOTSUPP;
2938
2939 if (!data || !size_arg)
2940 return -EINVAL;
2941
2942 size_val = *size_arg;
2943 size = &size_val;
2944
2945 if (smu->ppt_funcs->read_sensor)
2946 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2947 goto unlock;
2948
2949 switch (sensor) {
2950 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2951 *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2952 *size = 4;
2953 break;
2954 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2955 *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2956 *size = 4;
2957 break;
2958 case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
2959 *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
2960 *size = 4;
2961 break;
2962 case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
2963 *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
2964 *size = 4;
2965 break;
2966 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2967 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2968 *size = 8;
2969 break;
2970 case AMDGPU_PP_SENSOR_UVD_POWER:
2971 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2972 *size = 4;
2973 break;
2974 case AMDGPU_PP_SENSOR_VCE_POWER:
2975 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2976 *size = 4;
2977 break;
2978 case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2979 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
2980 *size = 4;
2981 break;
2982 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2983 *(uint32_t *)data = 0;
2984 *size = 4;
2985 break;
2986 default:
2987 *size = 0;
2988 ret = -EOPNOTSUPP;
2989 break;
2990 }
2991
2992 unlock:
2993 // assign uint32_t to int
2994 *size_arg = size_val;
2995
2996 return ret;
2997 }
2998
smu_get_apu_thermal_limit(void * handle,uint32_t * limit)2999 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
3000 {
3001 int ret = -EOPNOTSUPP;
3002 struct smu_context *smu = handle;
3003
3004 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3005 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3006
3007 return ret;
3008 }
3009
smu_set_apu_thermal_limit(void * handle,uint32_t limit)3010 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3011 {
3012 int ret = -EOPNOTSUPP;
3013 struct smu_context *smu = handle;
3014
3015 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3016 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3017
3018 return ret;
3019 }
3020
smu_get_power_profile_mode(void * handle,char * buf)3021 static int smu_get_power_profile_mode(void *handle, char *buf)
3022 {
3023 struct smu_context *smu = handle;
3024
3025 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3026 !smu->ppt_funcs->get_power_profile_mode)
3027 return -EOPNOTSUPP;
3028 if (!buf)
3029 return -EINVAL;
3030
3031 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3032 }
3033
smu_set_power_profile_mode(void * handle,long * param,uint32_t param_size)3034 static int smu_set_power_profile_mode(void *handle,
3035 long *param,
3036 uint32_t param_size)
3037 {
3038 struct smu_context *smu = handle;
3039
3040 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3041 !smu->ppt_funcs->set_power_profile_mode)
3042 return -EOPNOTSUPP;
3043
3044 return smu_bump_power_profile_mode(smu, param, param_size);
3045 }
3046
smu_get_fan_control_mode(void * handle,u32 * fan_mode)3047 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3048 {
3049 struct smu_context *smu = handle;
3050
3051 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3052 return -EOPNOTSUPP;
3053
3054 if (!smu->ppt_funcs->get_fan_control_mode)
3055 return -EOPNOTSUPP;
3056
3057 if (!fan_mode)
3058 return -EINVAL;
3059
3060 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3061
3062 return 0;
3063 }
3064
smu_set_fan_control_mode(void * handle,u32 value)3065 static int smu_set_fan_control_mode(void *handle, u32 value)
3066 {
3067 struct smu_context *smu = handle;
3068 int ret = 0;
3069
3070 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3071 return -EOPNOTSUPP;
3072
3073 if (!smu->ppt_funcs->set_fan_control_mode)
3074 return -EOPNOTSUPP;
3075
3076 if (value == U32_MAX)
3077 return -EINVAL;
3078
3079 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3080 if (ret)
3081 goto out;
3082
3083 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3084 smu->user_dpm_profile.fan_mode = value;
3085
3086 /* reset user dpm fan speed */
3087 if (value != AMD_FAN_CTRL_MANUAL) {
3088 smu->user_dpm_profile.fan_speed_pwm = 0;
3089 smu->user_dpm_profile.fan_speed_rpm = 0;
3090 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3091 }
3092 }
3093
3094 out:
3095 return ret;
3096 }
3097
smu_get_fan_speed_pwm(void * handle,u32 * speed)3098 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3099 {
3100 struct smu_context *smu = handle;
3101 int ret = 0;
3102
3103 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3104 return -EOPNOTSUPP;
3105
3106 if (!smu->ppt_funcs->get_fan_speed_pwm)
3107 return -EOPNOTSUPP;
3108
3109 if (!speed)
3110 return -EINVAL;
3111
3112 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3113
3114 return ret;
3115 }
3116
smu_set_fan_speed_pwm(void * handle,u32 speed)3117 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3118 {
3119 struct smu_context *smu = handle;
3120 int ret = 0;
3121
3122 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3123 return -EOPNOTSUPP;
3124
3125 if (!smu->ppt_funcs->set_fan_speed_pwm)
3126 return -EOPNOTSUPP;
3127
3128 if (speed == U32_MAX)
3129 return -EINVAL;
3130
3131 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3132 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3133 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3134 smu->user_dpm_profile.fan_speed_pwm = speed;
3135
3136 /* Override custom RPM setting as they cannot co-exist */
3137 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3138 smu->user_dpm_profile.fan_speed_rpm = 0;
3139 }
3140
3141 return ret;
3142 }
3143
smu_get_fan_speed_rpm(void * handle,uint32_t * speed)3144 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3145 {
3146 struct smu_context *smu = handle;
3147 int ret = 0;
3148
3149 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3150 return -EOPNOTSUPP;
3151
3152 if (!smu->ppt_funcs->get_fan_speed_rpm)
3153 return -EOPNOTSUPP;
3154
3155 if (!speed)
3156 return -EINVAL;
3157
3158 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3159
3160 return ret;
3161 }
3162
smu_set_deep_sleep_dcefclk(void * handle,uint32_t clk)3163 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3164 {
3165 struct smu_context *smu = handle;
3166
3167 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3168 return -EOPNOTSUPP;
3169
3170 return smu_set_min_dcef_deep_sleep(smu, clk);
3171 }
3172
smu_get_clock_by_type_with_latency(void * handle,enum amd_pp_clock_type type,struct pp_clock_levels_with_latency * clocks)3173 static int smu_get_clock_by_type_with_latency(void *handle,
3174 enum amd_pp_clock_type type,
3175 struct pp_clock_levels_with_latency *clocks)
3176 {
3177 struct smu_context *smu = handle;
3178 enum smu_clk_type clk_type;
3179 int ret = 0;
3180
3181 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3182 return -EOPNOTSUPP;
3183
3184 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3185 switch (type) {
3186 case amd_pp_sys_clock:
3187 clk_type = SMU_GFXCLK;
3188 break;
3189 case amd_pp_mem_clock:
3190 clk_type = SMU_MCLK;
3191 break;
3192 case amd_pp_dcef_clock:
3193 clk_type = SMU_DCEFCLK;
3194 break;
3195 case amd_pp_disp_clock:
3196 clk_type = SMU_DISPCLK;
3197 break;
3198 default:
3199 dev_err(smu->adev->dev, "Invalid clock type!\n");
3200 return -EINVAL;
3201 }
3202
3203 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3204 }
3205
3206 return ret;
3207 }
3208
smu_display_clock_voltage_request(void * handle,struct pp_display_clock_request * clock_req)3209 static int smu_display_clock_voltage_request(void *handle,
3210 struct pp_display_clock_request *clock_req)
3211 {
3212 struct smu_context *smu = handle;
3213 int ret = 0;
3214
3215 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3216 return -EOPNOTSUPP;
3217
3218 if (smu->ppt_funcs->display_clock_voltage_request)
3219 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3220
3221 return ret;
3222 }
3223
3224
smu_display_disable_memory_clock_switch(void * handle,bool disable_memory_clock_switch)3225 static int smu_display_disable_memory_clock_switch(void *handle,
3226 bool disable_memory_clock_switch)
3227 {
3228 struct smu_context *smu = handle;
3229 int ret = -EINVAL;
3230
3231 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3232 return -EOPNOTSUPP;
3233
3234 if (smu->ppt_funcs->display_disable_memory_clock_switch)
3235 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3236
3237 return ret;
3238 }
3239
smu_set_xgmi_pstate(void * handle,uint32_t pstate)3240 static int smu_set_xgmi_pstate(void *handle,
3241 uint32_t pstate)
3242 {
3243 struct smu_context *smu = handle;
3244 int ret = 0;
3245
3246 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3247 return -EOPNOTSUPP;
3248
3249 if (smu->ppt_funcs->set_xgmi_pstate)
3250 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3251
3252 if (ret)
3253 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3254
3255 return ret;
3256 }
3257
smu_get_baco_capability(void * handle)3258 static int smu_get_baco_capability(void *handle)
3259 {
3260 struct smu_context *smu = handle;
3261
3262 if (!smu->pm_enabled)
3263 return false;
3264
3265 if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3266 return false;
3267
3268 return smu->ppt_funcs->get_bamaco_support(smu);
3269 }
3270
smu_baco_set_state(void * handle,int state)3271 static int smu_baco_set_state(void *handle, int state)
3272 {
3273 struct smu_context *smu = handle;
3274 int ret = 0;
3275
3276 if (!smu->pm_enabled)
3277 return -EOPNOTSUPP;
3278
3279 if (state == 0) {
3280 if (smu->ppt_funcs->baco_exit)
3281 ret = smu->ppt_funcs->baco_exit(smu);
3282 } else if (state == 1) {
3283 if (smu->ppt_funcs->baco_enter)
3284 ret = smu->ppt_funcs->baco_enter(smu);
3285 } else {
3286 return -EINVAL;
3287 }
3288
3289 if (ret)
3290 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3291 (state)?"enter":"exit");
3292
3293 return ret;
3294 }
3295
smu_mode1_reset_is_support(struct smu_context * smu)3296 bool smu_mode1_reset_is_support(struct smu_context *smu)
3297 {
3298 bool ret = false;
3299
3300 if (!smu->pm_enabled)
3301 return false;
3302
3303 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3304 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3305
3306 return ret;
3307 }
3308
smu_mode2_reset_is_support(struct smu_context * smu)3309 bool smu_mode2_reset_is_support(struct smu_context *smu)
3310 {
3311 bool ret = false;
3312
3313 if (!smu->pm_enabled)
3314 return false;
3315
3316 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
3317 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
3318
3319 return ret;
3320 }
3321
smu_mode1_reset(struct smu_context * smu)3322 int smu_mode1_reset(struct smu_context *smu)
3323 {
3324 int ret = 0;
3325
3326 if (!smu->pm_enabled)
3327 return -EOPNOTSUPP;
3328
3329 if (smu->ppt_funcs->mode1_reset)
3330 ret = smu->ppt_funcs->mode1_reset(smu);
3331
3332 return ret;
3333 }
3334
smu_mode2_reset(void * handle)3335 static int smu_mode2_reset(void *handle)
3336 {
3337 struct smu_context *smu = handle;
3338 int ret = 0;
3339
3340 if (!smu->pm_enabled)
3341 return -EOPNOTSUPP;
3342
3343 if (smu->ppt_funcs->mode2_reset)
3344 ret = smu->ppt_funcs->mode2_reset(smu);
3345
3346 if (ret)
3347 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3348
3349 return ret;
3350 }
3351
smu_enable_gfx_features(void * handle)3352 static int smu_enable_gfx_features(void *handle)
3353 {
3354 struct smu_context *smu = handle;
3355 int ret = 0;
3356
3357 if (!smu->pm_enabled)
3358 return -EOPNOTSUPP;
3359
3360 if (smu->ppt_funcs->enable_gfx_features)
3361 ret = smu->ppt_funcs->enable_gfx_features(smu);
3362
3363 if (ret)
3364 dev_err(smu->adev->dev, "enable gfx features failed!\n");
3365
3366 return ret;
3367 }
3368
smu_get_max_sustainable_clocks_by_dc(void * handle,struct pp_smu_nv_clock_table * max_clocks)3369 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3370 struct pp_smu_nv_clock_table *max_clocks)
3371 {
3372 struct smu_context *smu = handle;
3373 int ret = 0;
3374
3375 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3376 return -EOPNOTSUPP;
3377
3378 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3379 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3380
3381 return ret;
3382 }
3383
smu_get_uclk_dpm_states(void * handle,unsigned int * clock_values_in_khz,unsigned int * num_states)3384 static int smu_get_uclk_dpm_states(void *handle,
3385 unsigned int *clock_values_in_khz,
3386 unsigned int *num_states)
3387 {
3388 struct smu_context *smu = handle;
3389 int ret = 0;
3390
3391 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3392 return -EOPNOTSUPP;
3393
3394 if (smu->ppt_funcs->get_uclk_dpm_states)
3395 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3396
3397 return ret;
3398 }
3399
smu_get_current_power_state(void * handle)3400 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3401 {
3402 struct smu_context *smu = handle;
3403 enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3404
3405 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3406 return -EOPNOTSUPP;
3407
3408 if (smu->ppt_funcs->get_current_power_state)
3409 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3410
3411 return pm_state;
3412 }
3413
smu_get_dpm_clock_table(void * handle,struct dpm_clocks * clock_table)3414 static int smu_get_dpm_clock_table(void *handle,
3415 struct dpm_clocks *clock_table)
3416 {
3417 struct smu_context *smu = handle;
3418 int ret = 0;
3419
3420 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3421 return -EOPNOTSUPP;
3422
3423 if (smu->ppt_funcs->get_dpm_clock_table)
3424 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3425
3426 return ret;
3427 }
3428
smu_sys_get_gpu_metrics(void * handle,void ** table)3429 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3430 {
3431 struct smu_context *smu = handle;
3432
3433 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3434 return -EOPNOTSUPP;
3435
3436 if (!smu->ppt_funcs->get_gpu_metrics)
3437 return -EOPNOTSUPP;
3438
3439 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3440 }
3441
smu_sys_get_pm_metrics(void * handle,void * pm_metrics,size_t size)3442 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3443 size_t size)
3444 {
3445 struct smu_context *smu = handle;
3446
3447 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3448 return -EOPNOTSUPP;
3449
3450 if (!smu->ppt_funcs->get_pm_metrics)
3451 return -EOPNOTSUPP;
3452
3453 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3454 }
3455
smu_enable_mgpu_fan_boost(void * handle)3456 static int smu_enable_mgpu_fan_boost(void *handle)
3457 {
3458 struct smu_context *smu = handle;
3459 int ret = 0;
3460
3461 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3462 return -EOPNOTSUPP;
3463
3464 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3465 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3466
3467 return ret;
3468 }
3469
smu_gfx_state_change_set(void * handle,uint32_t state)3470 static int smu_gfx_state_change_set(void *handle,
3471 uint32_t state)
3472 {
3473 struct smu_context *smu = handle;
3474 int ret = 0;
3475
3476 if (smu->ppt_funcs->gfx_state_change_set)
3477 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3478
3479 return ret;
3480 }
3481
smu_handle_passthrough_sbr(struct smu_context * smu,bool enable)3482 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3483 {
3484 int ret = 0;
3485
3486 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3487 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3488
3489 return ret;
3490 }
3491
smu_get_ecc_info(struct smu_context * smu,void * umc_ecc)3492 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3493 {
3494 int ret = -EOPNOTSUPP;
3495
3496 if (smu->ppt_funcs &&
3497 smu->ppt_funcs->get_ecc_info)
3498 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3499
3500 return ret;
3501
3502 }
3503
smu_get_prv_buffer_details(void * handle,void ** addr,size_t * size)3504 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3505 {
3506 struct smu_context *smu = handle;
3507 struct smu_table_context *smu_table = &smu->smu_table;
3508 struct smu_table *memory_pool = &smu_table->memory_pool;
3509
3510 if (!addr || !size)
3511 return -EINVAL;
3512
3513 *addr = NULL;
3514 *size = 0;
3515 if (memory_pool->bo) {
3516 *addr = memory_pool->cpu_addr;
3517 *size = memory_pool->size;
3518 }
3519
3520 return 0;
3521 }
3522
smu_print_dpm_policy(struct smu_dpm_policy * policy,char * sysbuf,size_t * size)3523 static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3524 size_t *size)
3525 {
3526 size_t offset = *size;
3527 int level;
3528
3529 for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3530 if (level == policy->current_level)
3531 offset += sysfs_emit_at(sysbuf, offset,
3532 "%d : %s*\n", level,
3533 policy->desc->get_desc(policy, level));
3534 else
3535 offset += sysfs_emit_at(sysbuf, offset,
3536 "%d : %s\n", level,
3537 policy->desc->get_desc(policy, level));
3538 }
3539
3540 *size = offset;
3541 }
3542
smu_get_pm_policy_info(struct smu_context * smu,enum pp_pm_policy p_type,char * sysbuf)3543 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3544 enum pp_pm_policy p_type, char *sysbuf)
3545 {
3546 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3547 struct smu_dpm_policy_ctxt *policy_ctxt;
3548 struct smu_dpm_policy *dpm_policy;
3549 size_t offset = 0;
3550
3551 policy_ctxt = dpm_ctxt->dpm_policies;
3552 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3553 !policy_ctxt->policy_mask)
3554 return -EOPNOTSUPP;
3555
3556 if (p_type == PP_PM_POLICY_NONE)
3557 return -EINVAL;
3558
3559 dpm_policy = smu_get_pm_policy(smu, p_type);
3560 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3561 return -ENOENT;
3562
3563 if (!sysbuf)
3564 return -EINVAL;
3565
3566 smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3567
3568 return offset;
3569 }
3570
smu_get_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type)3571 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3572 enum pp_pm_policy p_type)
3573 {
3574 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3575 struct smu_dpm_policy_ctxt *policy_ctxt;
3576 int i;
3577
3578 policy_ctxt = dpm_ctxt->dpm_policies;
3579 if (!policy_ctxt)
3580 return NULL;
3581
3582 for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3583 if (policy_ctxt->policies[i].policy_type == p_type)
3584 return &policy_ctxt->policies[i];
3585 }
3586
3587 return NULL;
3588 }
3589
smu_set_pm_policy(struct smu_context * smu,enum pp_pm_policy p_type,int level)3590 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3591 int level)
3592 {
3593 struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3594 struct smu_dpm_policy *dpm_policy = NULL;
3595 struct smu_dpm_policy_ctxt *policy_ctxt;
3596 int ret = -EOPNOTSUPP;
3597
3598 policy_ctxt = dpm_ctxt->dpm_policies;
3599 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3600 !policy_ctxt->policy_mask)
3601 return ret;
3602
3603 if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3604 return -EINVAL;
3605
3606 dpm_policy = smu_get_pm_policy(smu, p_type);
3607
3608 if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3609 return ret;
3610
3611 if (dpm_policy->current_level == level)
3612 return 0;
3613
3614 ret = dpm_policy->set_policy(smu, level);
3615
3616 if (!ret)
3617 dpm_policy->current_level = level;
3618
3619 return ret;
3620 }
3621
3622 static const struct amd_pm_funcs swsmu_pm_funcs = {
3623 /* export for sysfs */
3624 .set_fan_control_mode = smu_set_fan_control_mode,
3625 .get_fan_control_mode = smu_get_fan_control_mode,
3626 .set_fan_speed_pwm = smu_set_fan_speed_pwm,
3627 .get_fan_speed_pwm = smu_get_fan_speed_pwm,
3628 .force_clock_level = smu_force_ppclk_levels,
3629 .print_clock_levels = smu_print_ppclk_levels,
3630 .emit_clock_levels = smu_emit_ppclk_levels,
3631 .force_performance_level = smu_force_performance_level,
3632 .read_sensor = smu_read_sensor,
3633 .get_apu_thermal_limit = smu_get_apu_thermal_limit,
3634 .set_apu_thermal_limit = smu_set_apu_thermal_limit,
3635 .get_performance_level = smu_get_performance_level,
3636 .get_current_power_state = smu_get_current_power_state,
3637 .get_fan_speed_rpm = smu_get_fan_speed_rpm,
3638 .set_fan_speed_rpm = smu_set_fan_speed_rpm,
3639 .get_pp_num_states = smu_get_power_num_states,
3640 .get_pp_table = smu_sys_get_pp_table,
3641 .set_pp_table = smu_sys_set_pp_table,
3642 .switch_power_profile = smu_switch_power_profile,
3643 /* export to amdgpu */
3644 .dispatch_tasks = smu_handle_dpm_task,
3645 .load_firmware = smu_load_microcode,
3646 .set_powergating_by_smu = smu_dpm_set_power_gate,
3647 .set_power_limit = smu_set_power_limit,
3648 .get_power_limit = smu_get_power_limit,
3649 .get_power_profile_mode = smu_get_power_profile_mode,
3650 .set_power_profile_mode = smu_set_power_profile_mode,
3651 .odn_edit_dpm_table = smu_od_edit_dpm_table,
3652 .set_mp1_state = smu_set_mp1_state,
3653 .gfx_state_change_set = smu_gfx_state_change_set,
3654 /* export to DC */
3655 .get_sclk = smu_get_sclk,
3656 .get_mclk = smu_get_mclk,
3657 .display_configuration_change = smu_display_configuration_change,
3658 .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3659 .display_clock_voltage_request = smu_display_clock_voltage_request,
3660 .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3661 .set_active_display_count = smu_set_display_count,
3662 .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3663 .get_asic_baco_capability = smu_get_baco_capability,
3664 .set_asic_baco_state = smu_baco_set_state,
3665 .get_ppfeature_status = smu_sys_get_pp_feature_mask,
3666 .set_ppfeature_status = smu_sys_set_pp_feature_mask,
3667 .asic_reset_mode_2 = smu_mode2_reset,
3668 .asic_reset_enable_gfx_features = smu_enable_gfx_features,
3669 .set_df_cstate = smu_set_df_cstate,
3670 .set_xgmi_pstate = smu_set_xgmi_pstate,
3671 .get_gpu_metrics = smu_sys_get_gpu_metrics,
3672 .get_pm_metrics = smu_sys_get_pm_metrics,
3673 .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3674 .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3675 .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3676 .get_uclk_dpm_states = smu_get_uclk_dpm_states,
3677 .get_dpm_clock_table = smu_get_dpm_clock_table,
3678 .get_smu_prv_buf_details = smu_get_prv_buffer_details,
3679 };
3680
smu_wait_for_event(struct smu_context * smu,enum smu_event_type event,uint64_t event_arg)3681 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3682 uint64_t event_arg)
3683 {
3684 int ret = -EINVAL;
3685
3686 if (smu->ppt_funcs->wait_for_event)
3687 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3688
3689 return ret;
3690 }
3691
smu_stb_collect_info(struct smu_context * smu,void * buf,uint32_t size)3692 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3693 {
3694
3695 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3696 return -EOPNOTSUPP;
3697
3698 /* Confirm the buffer allocated is of correct size */
3699 if (size != smu->stb_context.stb_buf_size)
3700 return -EINVAL;
3701
3702 /*
3703 * No need to lock smu mutex as we access STB directly through MMIO
3704 * and not going through SMU messaging route (for now at least).
3705 * For registers access rely on implementation internal locking.
3706 */
3707 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3708 }
3709
3710 #if defined(CONFIG_DEBUG_FS)
3711
smu_stb_debugfs_open(struct inode * inode,struct file * filp)3712 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3713 {
3714 struct amdgpu_device *adev = filp->f_inode->i_private;
3715 struct smu_context *smu = adev->powerplay.pp_handle;
3716 unsigned char *buf;
3717 int r;
3718
3719 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3720 if (!buf)
3721 return -ENOMEM;
3722
3723 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3724 if (r)
3725 goto out;
3726
3727 filp->private_data = buf;
3728
3729 return 0;
3730
3731 out:
3732 kvfree(buf);
3733 return r;
3734 }
3735
smu_stb_debugfs_read(struct file * filp,char __user * buf,size_t size,loff_t * pos)3736 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3737 loff_t *pos)
3738 {
3739 struct amdgpu_device *adev = filp->f_inode->i_private;
3740 struct smu_context *smu = adev->powerplay.pp_handle;
3741
3742
3743 if (!filp->private_data)
3744 return -EINVAL;
3745
3746 return simple_read_from_buffer(buf,
3747 size,
3748 pos, filp->private_data,
3749 smu->stb_context.stb_buf_size);
3750 }
3751
smu_stb_debugfs_release(struct inode * inode,struct file * filp)3752 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3753 {
3754 kvfree(filp->private_data);
3755 filp->private_data = NULL;
3756
3757 return 0;
3758 }
3759
3760 /*
3761 * We have to define not only read method but also
3762 * open and release because .read takes up to PAGE_SIZE
3763 * data each time so and so is invoked multiple times.
3764 * We allocate the STB buffer in .open and release it
3765 * in .release
3766 */
3767 static const struct file_operations smu_stb_debugfs_fops = {
3768 .owner = THIS_MODULE,
3769 .open = smu_stb_debugfs_open,
3770 .read = smu_stb_debugfs_read,
3771 .release = smu_stb_debugfs_release,
3772 .llseek = default_llseek,
3773 };
3774
3775 #endif
3776
amdgpu_smu_stb_debug_fs_init(struct amdgpu_device * adev)3777 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3778 {
3779 #if defined(CONFIG_DEBUG_FS)
3780
3781 struct smu_context *smu = adev->powerplay.pp_handle;
3782
3783 if (!smu || (!smu->stb_context.stb_buf_size))
3784 return;
3785
3786 debugfs_create_file_size("amdgpu_smu_stb_dump",
3787 S_IRUSR,
3788 adev_to_drm(adev)->primary->debugfs_root,
3789 adev,
3790 &smu_stb_debugfs_fops,
3791 smu->stb_context.stb_buf_size);
3792 #endif
3793 }
3794
smu_send_hbm_bad_pages_num(struct smu_context * smu,uint32_t size)3795 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3796 {
3797 int ret = 0;
3798
3799 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3800 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3801
3802 return ret;
3803 }
3804
smu_send_hbm_bad_channel_flag(struct smu_context * smu,uint32_t size)3805 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3806 {
3807 int ret = 0;
3808
3809 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3810 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3811
3812 return ret;
3813 }
3814
smu_send_rma_reason(struct smu_context * smu)3815 int smu_send_rma_reason(struct smu_context *smu)
3816 {
3817 int ret = 0;
3818
3819 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
3820 ret = smu->ppt_funcs->send_rma_reason(smu);
3821
3822 return ret;
3823 }
3824