xref: /linux/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c (revision 44343e8b250abb2f6bfd615493ca07a7f11f3cc2)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L1
24 
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "smu_v14_0_0_ppt.h"
48 #include "smu_v14_0_2_ppt.h"
49 #include "amd_pcie.h"
50 
51 /*
52  * DO NOT use these for err/warn/info/debug messages.
53  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54  * They are more MGPU friendly.
55  */
56 #undef pr_err
57 #undef pr_warn
58 #undef pr_info
59 #undef pr_debug
60 
61 static const struct amd_pm_funcs swsmu_pm_funcs;
62 static int smu_force_smuclk_levels(struct smu_context *smu,
63 				   enum smu_clk_type clk_type,
64 				   uint32_t mask);
65 static int smu_handle_task(struct smu_context *smu,
66 			   enum amd_dpm_forced_level level,
67 			   enum amd_pp_task task_id);
68 static int smu_reset(struct smu_context *smu);
69 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70 static int smu_set_fan_control_mode(void *handle, u32 value);
71 static int smu_set_power_limit(void *handle, uint32_t limit);
72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75 static void smu_power_profile_mode_get(struct smu_context *smu,
76 				       enum PP_SMC_POWER_PROFILE profile_mode);
77 static void smu_power_profile_mode_put(struct smu_context *smu,
78 				       enum PP_SMC_POWER_PROFILE profile_mode);
79 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
80 static int smu_od_edit_dpm_table(void *handle,
81 				 enum PP_OD_DPM_TABLE_COMMAND type,
82 				 long *input, uint32_t size);
83 
84 static int smu_sys_get_pp_feature_mask(void *handle,
85 				       char *buf)
86 {
87 	struct smu_context *smu = handle;
88 
89 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
90 		return -EOPNOTSUPP;
91 
92 	return smu_get_pp_feature_mask(smu, buf);
93 }
94 
95 static int smu_sys_set_pp_feature_mask(void *handle,
96 				       uint64_t new_mask)
97 {
98 	struct smu_context *smu = handle;
99 
100 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
101 		return -EOPNOTSUPP;
102 
103 	return smu_set_pp_feature_mask(smu, new_mask);
104 }
105 
106 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
107 {
108 	if (!smu->ppt_funcs->set_gfx_off_residency)
109 		return -EINVAL;
110 
111 	return smu_set_gfx_off_residency(smu, value);
112 }
113 
114 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
115 {
116 	if (!smu->ppt_funcs->get_gfx_off_residency)
117 		return -EINVAL;
118 
119 	return smu_get_gfx_off_residency(smu, value);
120 }
121 
122 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
123 {
124 	if (!smu->ppt_funcs->get_gfx_off_entrycount)
125 		return -EINVAL;
126 
127 	return smu_get_gfx_off_entrycount(smu, value);
128 }
129 
130 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
131 {
132 	if (!smu->ppt_funcs->get_gfx_off_status)
133 		return -EINVAL;
134 
135 	*value = smu_get_gfx_off_status(smu);
136 
137 	return 0;
138 }
139 
140 int smu_set_soft_freq_range(struct smu_context *smu,
141 			    enum pp_clock_type type,
142 			    uint32_t min,
143 			    uint32_t max)
144 {
145 	enum smu_clk_type clk_type;
146 	int ret = 0;
147 
148 	clk_type = smu_convert_to_smuclk(type);
149 	if (clk_type == SMU_CLK_COUNT)
150 		return -EINVAL;
151 
152 	if (smu->ppt_funcs->set_soft_freq_limited_range)
153 		ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
154 								  clk_type,
155 								  min,
156 								  max,
157 								  false);
158 
159 	return ret;
160 }
161 
162 int smu_get_dpm_freq_range(struct smu_context *smu,
163 			   enum smu_clk_type clk_type,
164 			   uint32_t *min,
165 			   uint32_t *max)
166 {
167 	int ret = -ENOTSUPP;
168 
169 	if (!min && !max)
170 		return -EINVAL;
171 
172 	if (smu->ppt_funcs->get_dpm_ultimate_freq)
173 		ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
174 							    clk_type,
175 							    min,
176 							    max);
177 
178 	return ret;
179 }
180 
181 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
182 {
183 	int ret = 0;
184 	struct amdgpu_device *adev = smu->adev;
185 
186 	if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
187 		ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
188 		if (ret)
189 			dev_err(adev->dev, "Failed to enable gfx imu!\n");
190 	}
191 	return ret;
192 }
193 
194 static u32 smu_get_mclk(void *handle, bool low)
195 {
196 	struct smu_context *smu = handle;
197 	uint32_t clk_freq;
198 	int ret = 0;
199 
200 	ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
201 				     low ? &clk_freq : NULL,
202 				     !low ? &clk_freq : NULL);
203 	if (ret)
204 		return 0;
205 	return clk_freq * 100;
206 }
207 
208 static u32 smu_get_sclk(void *handle, bool low)
209 {
210 	struct smu_context *smu = handle;
211 	uint32_t clk_freq;
212 	int ret = 0;
213 
214 	ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
215 				     low ? &clk_freq : NULL,
216 				     !low ? &clk_freq : NULL);
217 	if (ret)
218 		return 0;
219 	return clk_freq * 100;
220 }
221 
222 static int smu_set_gfx_imu_enable(struct smu_context *smu)
223 {
224 	struct amdgpu_device *adev = smu->adev;
225 
226 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
227 		return 0;
228 
229 	if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
230 		return 0;
231 
232 	return smu_set_gfx_power_up_by_imu(smu);
233 }
234 
235 static bool is_vcn_enabled(struct amdgpu_device *adev)
236 {
237 	int i;
238 
239 	for (i = 0; i < adev->num_ip_blocks; i++) {
240 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
241 			adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
242 			!adev->ip_blocks[i].status.valid)
243 			return false;
244 	}
245 
246 	return true;
247 }
248 
249 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
250 				   bool enable,
251 				   int inst)
252 {
253 	struct smu_power_context *smu_power = &smu->smu_power;
254 	struct smu_power_gate *power_gate = &smu_power->power_gate;
255 	int ret = 0;
256 
257 	/*
258 	 * don't poweron vcn/jpeg when they are skipped.
259 	 */
260 	if (!is_vcn_enabled(smu->adev))
261 		return 0;
262 
263 	if (!smu->ppt_funcs->dpm_set_vcn_enable)
264 		return 0;
265 
266 	if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)
267 		return 0;
268 
269 	ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);
270 	if (!ret)
271 		atomic_set(&power_gate->vcn_gated[inst], !enable);
272 
273 	return ret;
274 }
275 
276 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
277 				   bool enable)
278 {
279 	struct smu_power_context *smu_power = &smu->smu_power;
280 	struct smu_power_gate *power_gate = &smu_power->power_gate;
281 	int ret = 0;
282 
283 	if (!is_vcn_enabled(smu->adev))
284 		return 0;
285 
286 	if (!smu->ppt_funcs->dpm_set_jpeg_enable)
287 		return 0;
288 
289 	if (atomic_read(&power_gate->jpeg_gated) ^ enable)
290 		return 0;
291 
292 	ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
293 	if (!ret)
294 		atomic_set(&power_gate->jpeg_gated, !enable);
295 
296 	return ret;
297 }
298 
299 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
300 				   bool enable)
301 {
302 	struct smu_power_context *smu_power = &smu->smu_power;
303 	struct smu_power_gate *power_gate = &smu_power->power_gate;
304 	int ret = 0;
305 
306 	if (!smu->ppt_funcs->dpm_set_vpe_enable)
307 		return 0;
308 
309 	if (atomic_read(&power_gate->vpe_gated) ^ enable)
310 		return 0;
311 
312 	ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
313 	if (!ret)
314 		atomic_set(&power_gate->vpe_gated, !enable);
315 
316 	return ret;
317 }
318 
319 static int smu_dpm_set_isp_enable(struct smu_context *smu,
320 				  bool enable)
321 {
322 	struct smu_power_context *smu_power = &smu->smu_power;
323 	struct smu_power_gate *power_gate = &smu_power->power_gate;
324 	int ret;
325 
326 	if (!smu->ppt_funcs->dpm_set_isp_enable)
327 		return 0;
328 
329 	if (atomic_read(&power_gate->isp_gated) ^ enable)
330 		return 0;
331 
332 	ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);
333 	if (!ret)
334 		atomic_set(&power_gate->isp_gated, !enable);
335 
336 	return ret;
337 }
338 
339 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
340 				   bool enable)
341 {
342 	struct smu_power_context *smu_power = &smu->smu_power;
343 	struct smu_power_gate *power_gate = &smu_power->power_gate;
344 	int ret = 0;
345 
346 	if (!smu->adev->enable_umsch_mm)
347 		return 0;
348 
349 	if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
350 		return 0;
351 
352 	if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
353 		return 0;
354 
355 	ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
356 	if (!ret)
357 		atomic_set(&power_gate->umsch_mm_gated, !enable);
358 
359 	return ret;
360 }
361 
362 static int smu_set_mall_enable(struct smu_context *smu)
363 {
364 	int ret = 0;
365 
366 	if (!smu->ppt_funcs->set_mall_enable)
367 		return 0;
368 
369 	ret = smu->ppt_funcs->set_mall_enable(smu);
370 
371 	return ret;
372 }
373 
374 /**
375  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
376  *
377  * @handle:        smu_context pointer
378  * @block_type:    the IP block to power gate/ungate
379  * @gate:          to power gate if true, ungate otherwise
380  * @inst:          the instance of the IP block to power gate/ungate
381  *
382  * This API uses no smu->mutex lock protection due to:
383  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
384  *    This is guarded to be race condition free by the caller.
385  * 2. Or get called on user setting request of power_dpm_force_performance_level.
386  *    Under this case, the smu->mutex lock protection is already enforced on
387  *    the parent API smu_force_performance_level of the call path.
388  */
389 static int smu_dpm_set_power_gate(void *handle,
390 				  uint32_t block_type,
391 				  bool gate,
392 				  int inst)
393 {
394 	struct smu_context *smu = handle;
395 	int ret = 0;
396 
397 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
398 		dev_WARN(smu->adev->dev,
399 			 "SMU uninitialized but power %s requested for %u!\n",
400 			 gate ? "gate" : "ungate", block_type);
401 		return -EOPNOTSUPP;
402 	}
403 
404 	switch (block_type) {
405 	/*
406 	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
407 	 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
408 	 */
409 	case AMD_IP_BLOCK_TYPE_UVD:
410 	case AMD_IP_BLOCK_TYPE_VCN:
411 		ret = smu_dpm_set_vcn_enable(smu, !gate, inst);
412 		if (ret)
413 			dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",
414 				gate ? "gate" : "ungate", inst);
415 		break;
416 	case AMD_IP_BLOCK_TYPE_GFX:
417 		ret = smu_gfx_off_control(smu, gate);
418 		if (ret)
419 			dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
420 				gate ? "enable" : "disable");
421 		break;
422 	case AMD_IP_BLOCK_TYPE_SDMA:
423 		ret = smu_powergate_sdma(smu, gate);
424 		if (ret)
425 			dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
426 				gate ? "gate" : "ungate");
427 		break;
428 	case AMD_IP_BLOCK_TYPE_JPEG:
429 		ret = smu_dpm_set_jpeg_enable(smu, !gate);
430 		if (ret)
431 			dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
432 				gate ? "gate" : "ungate");
433 		break;
434 	case AMD_IP_BLOCK_TYPE_VPE:
435 		ret = smu_dpm_set_vpe_enable(smu, !gate);
436 		if (ret)
437 			dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
438 				gate ? "gate" : "ungate");
439 		break;
440 	case AMD_IP_BLOCK_TYPE_ISP:
441 		ret = smu_dpm_set_isp_enable(smu, !gate);
442 		if (ret)
443 			dev_err(smu->adev->dev, "Failed to power %s ISP!\n",
444 				gate ? "gate" : "ungate");
445 		break;
446 	default:
447 		dev_err(smu->adev->dev, "Unsupported block type!\n");
448 		return -EINVAL;
449 	}
450 
451 	return ret;
452 }
453 
454 /**
455  * smu_set_user_clk_dependencies - set user profile clock dependencies
456  *
457  * @smu:	smu_context pointer
458  * @clk:	enum smu_clk_type type
459  *
460  * Enable/Disable the clock dependency for the @clk type.
461  */
462 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
463 {
464 	if (smu->adev->in_suspend)
465 		return;
466 
467 	if (clk == SMU_MCLK) {
468 		smu->user_dpm_profile.clk_dependency = 0;
469 		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
470 	} else if (clk == SMU_FCLK) {
471 		/* MCLK takes precedence over FCLK */
472 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
473 			return;
474 
475 		smu->user_dpm_profile.clk_dependency = 0;
476 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
477 	} else if (clk == SMU_SOCCLK) {
478 		/* MCLK takes precedence over SOCCLK */
479 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
480 			return;
481 
482 		smu->user_dpm_profile.clk_dependency = 0;
483 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
484 	} else
485 		/* Add clk dependencies here, if any */
486 		return;
487 }
488 
489 /**
490  * smu_restore_dpm_user_profile - reinstate user dpm profile
491  *
492  * @smu:	smu_context pointer
493  *
494  * Restore the saved user power configurations include power limit,
495  * clock frequencies, fan control mode and fan speed.
496  */
497 static void smu_restore_dpm_user_profile(struct smu_context *smu)
498 {
499 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
500 	int ret = 0;
501 
502 	if (!smu->adev->in_suspend)
503 		return;
504 
505 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
506 		return;
507 
508 	/* Enable restore flag */
509 	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
510 
511 	/* set the user dpm power limit */
512 	if (smu->user_dpm_profile.power_limit) {
513 		ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
514 		if (ret)
515 			dev_err(smu->adev->dev, "Failed to set power limit value\n");
516 	}
517 
518 	/* set the user dpm clock configurations */
519 	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
520 		enum smu_clk_type clk_type;
521 
522 		for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
523 			/*
524 			 * Iterate over smu clk type and force the saved user clk
525 			 * configs, skip if clock dependency is enabled
526 			 */
527 			if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
528 					smu->user_dpm_profile.clk_mask[clk_type]) {
529 				ret = smu_force_smuclk_levels(smu, clk_type,
530 						smu->user_dpm_profile.clk_mask[clk_type]);
531 				if (ret)
532 					dev_err(smu->adev->dev,
533 						"Failed to set clock type = %d\n", clk_type);
534 			}
535 		}
536 	}
537 
538 	/* set the user dpm fan configurations */
539 	if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
540 	    smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
541 		ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
542 		if (ret != -EOPNOTSUPP) {
543 			smu->user_dpm_profile.fan_speed_pwm = 0;
544 			smu->user_dpm_profile.fan_speed_rpm = 0;
545 			smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
546 			dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
547 		}
548 
549 		if (smu->user_dpm_profile.fan_speed_pwm) {
550 			ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
551 			if (ret != -EOPNOTSUPP)
552 				dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
553 		}
554 
555 		if (smu->user_dpm_profile.fan_speed_rpm) {
556 			ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
557 			if (ret != -EOPNOTSUPP)
558 				dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
559 		}
560 	}
561 
562 	/* Restore user customized OD settings */
563 	if (smu->user_dpm_profile.user_od) {
564 		if (smu->ppt_funcs->restore_user_od_settings) {
565 			ret = smu->ppt_funcs->restore_user_od_settings(smu);
566 			if (ret)
567 				dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
568 		}
569 	}
570 
571 	/* Disable restore flag */
572 	smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
573 }
574 
575 static int smu_get_power_num_states(void *handle,
576 				    struct pp_states_info *state_info)
577 {
578 	if (!state_info)
579 		return -EINVAL;
580 
581 	/* not support power state */
582 	memset(state_info, 0, sizeof(struct pp_states_info));
583 	state_info->nums = 1;
584 	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
585 
586 	return 0;
587 }
588 
589 bool is_support_sw_smu(struct amdgpu_device *adev)
590 {
591 	/* vega20 is 11.0.2, but it's supported via the powerplay code */
592 	if (adev->asic_type == CHIP_VEGA20)
593 		return false;
594 
595 	if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
596 	    amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
597 		return true;
598 
599 	return false;
600 }
601 
602 bool is_support_cclk_dpm(struct amdgpu_device *adev)
603 {
604 	struct smu_context *smu = adev->powerplay.pp_handle;
605 
606 	if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
607 		return false;
608 
609 	return true;
610 }
611 
612 
613 static int smu_sys_get_pp_table(void *handle,
614 				char **table)
615 {
616 	struct smu_context *smu = handle;
617 	struct smu_table_context *smu_table = &smu->smu_table;
618 
619 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
620 		return -EOPNOTSUPP;
621 
622 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
623 		return -EINVAL;
624 
625 	if (smu_table->hardcode_pptable)
626 		*table = smu_table->hardcode_pptable;
627 	else
628 		*table = smu_table->power_play_table;
629 
630 	return smu_table->power_play_table_size;
631 }
632 
633 static int smu_sys_set_pp_table(void *handle,
634 				const char *buf,
635 				size_t size)
636 {
637 	struct smu_context *smu = handle;
638 	struct smu_table_context *smu_table = &smu->smu_table;
639 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
640 	int ret = 0;
641 
642 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
643 		return -EOPNOTSUPP;
644 
645 	if (header->usStructureSize != size) {
646 		dev_err(smu->adev->dev, "pp table size not matched !\n");
647 		return -EIO;
648 	}
649 
650 	if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
651 		kfree(smu_table->hardcode_pptable);
652 		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
653 		if (!smu_table->hardcode_pptable)
654 			return -ENOMEM;
655 	}
656 
657 	memcpy(smu_table->hardcode_pptable, buf, size);
658 	smu_table->power_play_table = smu_table->hardcode_pptable;
659 	smu_table->power_play_table_size = size;
660 
661 	/*
662 	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
663 	 * skipped) may be needed for custom pptable uploading.
664 	 */
665 	smu->uploading_custom_pp_table = true;
666 
667 	ret = smu_reset(smu);
668 	if (ret)
669 		dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
670 
671 	smu->uploading_custom_pp_table = false;
672 
673 	return ret;
674 }
675 
676 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
677 {
678 	struct smu_feature *feature = &smu->smu_feature;
679 	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
680 	int ret = 0;
681 
682 	/*
683 	 * With SCPM enabled, the allowed featuremasks setting(via
684 	 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
685 	 * That means there is no way to let PMFW knows the settings below.
686 	 * Thus, we just assume all the features are allowed under
687 	 * such scenario.
688 	 */
689 	if (smu->adev->scpm_enabled) {
690 		bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
691 		return 0;
692 	}
693 
694 	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
695 
696 	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
697 					     SMU_FEATURE_MAX/32);
698 	if (ret)
699 		return ret;
700 
701 	bitmap_or(feature->allowed, feature->allowed,
702 		      (unsigned long *)allowed_feature_mask,
703 		      feature->feature_num);
704 
705 	return ret;
706 }
707 
708 static int smu_set_funcs(struct amdgpu_device *adev)
709 {
710 	struct smu_context *smu = adev->powerplay.pp_handle;
711 
712 	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
713 		smu->od_enabled = true;
714 
715 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
716 	case IP_VERSION(11, 0, 0):
717 	case IP_VERSION(11, 0, 5):
718 	case IP_VERSION(11, 0, 9):
719 		navi10_set_ppt_funcs(smu);
720 		break;
721 	case IP_VERSION(11, 0, 7):
722 	case IP_VERSION(11, 0, 11):
723 	case IP_VERSION(11, 0, 12):
724 	case IP_VERSION(11, 0, 13):
725 		sienna_cichlid_set_ppt_funcs(smu);
726 		break;
727 	case IP_VERSION(12, 0, 0):
728 	case IP_VERSION(12, 0, 1):
729 		renoir_set_ppt_funcs(smu);
730 		break;
731 	case IP_VERSION(11, 5, 0):
732 	case IP_VERSION(11, 5, 2):
733 		vangogh_set_ppt_funcs(smu);
734 		break;
735 	case IP_VERSION(13, 0, 1):
736 	case IP_VERSION(13, 0, 3):
737 	case IP_VERSION(13, 0, 8):
738 		yellow_carp_set_ppt_funcs(smu);
739 		break;
740 	case IP_VERSION(13, 0, 4):
741 	case IP_VERSION(13, 0, 11):
742 		smu_v13_0_4_set_ppt_funcs(smu);
743 		break;
744 	case IP_VERSION(13, 0, 5):
745 		smu_v13_0_5_set_ppt_funcs(smu);
746 		break;
747 	case IP_VERSION(11, 0, 8):
748 		cyan_skillfish_set_ppt_funcs(smu);
749 		break;
750 	case IP_VERSION(11, 0, 2):
751 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
752 		arcturus_set_ppt_funcs(smu);
753 		/* OD is not supported on Arcturus */
754 		smu->od_enabled = false;
755 		break;
756 	case IP_VERSION(13, 0, 2):
757 		aldebaran_set_ppt_funcs(smu);
758 		/* Enable pp_od_clk_voltage node */
759 		smu->od_enabled = true;
760 		break;
761 	case IP_VERSION(13, 0, 0):
762 	case IP_VERSION(13, 0, 10):
763 		smu_v13_0_0_set_ppt_funcs(smu);
764 		break;
765 	case IP_VERSION(13, 0, 6):
766 	case IP_VERSION(13, 0, 14):
767 	case IP_VERSION(13, 0, 12):
768 		smu_v13_0_6_set_ppt_funcs(smu);
769 		/* Enable pp_od_clk_voltage node */
770 		smu->od_enabled = true;
771 		break;
772 	case IP_VERSION(13, 0, 7):
773 		smu_v13_0_7_set_ppt_funcs(smu);
774 		break;
775 	case IP_VERSION(14, 0, 0):
776 	case IP_VERSION(14, 0, 1):
777 	case IP_VERSION(14, 0, 4):
778 	case IP_VERSION(14, 0, 5):
779 		smu_v14_0_0_set_ppt_funcs(smu);
780 		break;
781 	case IP_VERSION(14, 0, 2):
782 	case IP_VERSION(14, 0, 3):
783 		smu_v14_0_2_set_ppt_funcs(smu);
784 		break;
785 	default:
786 		return -EINVAL;
787 	}
788 
789 	return 0;
790 }
791 
792 static int smu_early_init(struct amdgpu_ip_block *ip_block)
793 {
794 	struct amdgpu_device *adev = ip_block->adev;
795 	struct smu_context *smu;
796 	int r;
797 
798 	smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
799 	if (!smu)
800 		return -ENOMEM;
801 
802 	smu->adev = adev;
803 	smu->pm_enabled = !!amdgpu_dpm;
804 	smu->is_apu = false;
805 	smu->smu_baco.state = SMU_BACO_STATE_NONE;
806 	smu->smu_baco.platform_support = false;
807 	smu->smu_baco.maco_support = false;
808 	smu->user_dpm_profile.fan_mode = -1;
809 	smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
810 
811 	mutex_init(&smu->message_lock);
812 
813 	adev->powerplay.pp_handle = smu;
814 	adev->powerplay.pp_funcs = &swsmu_pm_funcs;
815 
816 	r = smu_set_funcs(adev);
817 	if (r)
818 		return r;
819 	return smu_init_microcode(smu);
820 }
821 
822 static int smu_set_default_dpm_table(struct smu_context *smu)
823 {
824 	struct amdgpu_device *adev = smu->adev;
825 	struct smu_power_context *smu_power = &smu->smu_power;
826 	struct smu_power_gate *power_gate = &smu_power->power_gate;
827 	int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;
828 	int ret = 0;
829 
830 	if (!smu->ppt_funcs->set_default_dpm_table)
831 		return 0;
832 
833 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
834 		for (i = 0; i < adev->vcn.num_vcn_inst; i++)
835 			vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);
836 	}
837 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
838 		jpeg_gate = atomic_read(&power_gate->jpeg_gated);
839 
840 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
841 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
842 			ret = smu_dpm_set_vcn_enable(smu, true, i);
843 			if (ret)
844 				return ret;
845 		}
846 	}
847 
848 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
849 		ret = smu_dpm_set_jpeg_enable(smu, true);
850 		if (ret)
851 			goto err_out;
852 	}
853 
854 	ret = smu->ppt_funcs->set_default_dpm_table(smu);
855 	if (ret)
856 		dev_err(smu->adev->dev,
857 			"Failed to setup default dpm clock tables!\n");
858 
859 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
860 		smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
861 err_out:
862 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
863 		for (i = 0; i < adev->vcn.num_vcn_inst; i++)
864 			smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);
865 	}
866 
867 	return ret;
868 }
869 
870 static int smu_apply_default_config_table_settings(struct smu_context *smu)
871 {
872 	struct amdgpu_device *adev = smu->adev;
873 	int ret = 0;
874 
875 	ret = smu_get_default_config_table_settings(smu,
876 						    &adev->pm.config_table);
877 	if (ret)
878 		return ret;
879 
880 	return smu_set_config_table(smu, &adev->pm.config_table);
881 }
882 
883 static int smu_late_init(struct amdgpu_ip_block *ip_block)
884 {
885 	struct amdgpu_device *adev = ip_block->adev;
886 	struct smu_context *smu = adev->powerplay.pp_handle;
887 	int ret = 0;
888 
889 	smu_set_fine_grain_gfx_freq_parameters(smu);
890 
891 	if (!smu->pm_enabled)
892 		return 0;
893 
894 	ret = smu_post_init(smu);
895 	if (ret) {
896 		dev_err(adev->dev, "Failed to post smu init!\n");
897 		return ret;
898 	}
899 
900 	/*
901 	 * Explicitly notify PMFW the power mode the system in. Since
902 	 * the PMFW may boot the ASIC with a different mode.
903 	 * For those supporting ACDC switch via gpio, PMFW will
904 	 * handle the switch automatically. Driver involvement
905 	 * is unnecessary.
906 	 */
907 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
908 	smu_set_ac_dc(smu);
909 
910 	if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
911 	    (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
912 		return 0;
913 
914 	if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
915 		ret = smu_set_default_od_settings(smu);
916 		if (ret) {
917 			dev_err(adev->dev, "Failed to setup default OD settings!\n");
918 			return ret;
919 		}
920 	}
921 
922 	ret = smu_populate_umd_state_clk(smu);
923 	if (ret) {
924 		dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
925 		return ret;
926 	}
927 
928 	ret = smu_get_asic_power_limits(smu,
929 					&smu->current_power_limit,
930 					&smu->default_power_limit,
931 					&smu->max_power_limit,
932 					&smu->min_power_limit);
933 	if (ret) {
934 		dev_err(adev->dev, "Failed to get asic power limits!\n");
935 		return ret;
936 	}
937 
938 	if (!amdgpu_sriov_vf(adev))
939 		smu_get_unique_id(smu);
940 
941 	smu_get_fan_parameters(smu);
942 
943 	smu_handle_task(smu,
944 			smu->smu_dpm.dpm_level,
945 			AMD_PP_TASK_COMPLETE_INIT);
946 
947 	ret = smu_apply_default_config_table_settings(smu);
948 	if (ret && (ret != -EOPNOTSUPP)) {
949 		dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
950 		return ret;
951 	}
952 
953 	smu_restore_dpm_user_profile(smu);
954 
955 	return 0;
956 }
957 
958 static int smu_init_fb_allocations(struct smu_context *smu)
959 {
960 	struct amdgpu_device *adev = smu->adev;
961 	struct smu_table_context *smu_table = &smu->smu_table;
962 	struct smu_table *tables = smu_table->tables;
963 	struct smu_table *driver_table = &(smu_table->driver_table);
964 	uint32_t max_table_size = 0;
965 	int ret, i;
966 
967 	/* VRAM allocation for tool table */
968 	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
969 		ret = amdgpu_bo_create_kernel(adev,
970 					      tables[SMU_TABLE_PMSTATUSLOG].size,
971 					      tables[SMU_TABLE_PMSTATUSLOG].align,
972 					      tables[SMU_TABLE_PMSTATUSLOG].domain,
973 					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
974 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
975 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
976 		if (ret) {
977 			dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
978 			return ret;
979 		}
980 	}
981 
982 	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
983 	/* VRAM allocation for driver table */
984 	for (i = 0; i < SMU_TABLE_COUNT; i++) {
985 		if (tables[i].size == 0)
986 			continue;
987 
988 		/* If one of the tables has VRAM domain restriction, keep it in
989 		 * VRAM
990 		 */
991 		if ((tables[i].domain &
992 		    (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
993 			    AMDGPU_GEM_DOMAIN_VRAM)
994 			driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
995 
996 		if (i == SMU_TABLE_PMSTATUSLOG)
997 			continue;
998 
999 		if (max_table_size < tables[i].size)
1000 			max_table_size = tables[i].size;
1001 	}
1002 
1003 	driver_table->size = max_table_size;
1004 	driver_table->align = PAGE_SIZE;
1005 
1006 	ret = amdgpu_bo_create_kernel(adev,
1007 				      driver_table->size,
1008 				      driver_table->align,
1009 				      driver_table->domain,
1010 				      &driver_table->bo,
1011 				      &driver_table->mc_address,
1012 				      &driver_table->cpu_addr);
1013 	if (ret) {
1014 		dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
1015 		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1016 			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1017 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1018 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1019 	}
1020 
1021 	return ret;
1022 }
1023 
1024 static int smu_fini_fb_allocations(struct smu_context *smu)
1025 {
1026 	struct smu_table_context *smu_table = &smu->smu_table;
1027 	struct smu_table *tables = smu_table->tables;
1028 	struct smu_table *driver_table = &(smu_table->driver_table);
1029 
1030 	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1031 		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1032 				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1033 				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1034 
1035 	amdgpu_bo_free_kernel(&driver_table->bo,
1036 			      &driver_table->mc_address,
1037 			      &driver_table->cpu_addr);
1038 
1039 	return 0;
1040 }
1041 
1042 static void smu_update_gpu_addresses(struct smu_context *smu)
1043 {
1044 	struct smu_table_context *smu_table = &smu->smu_table;
1045 	struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;
1046 	struct smu_table *driver_table = &(smu_table->driver_table);
1047 	struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;
1048 
1049 	if (pm_status_table->bo)
1050 		pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);
1051 	if (driver_table->bo)
1052 		driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);
1053 	if (dummy_read_1_table->bo)
1054 		dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);
1055 }
1056 
1057 /**
1058  * smu_alloc_memory_pool - allocate memory pool in the system memory
1059  *
1060  * @smu: amdgpu_device pointer
1061  *
1062  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1063  * and DramLogSetDramAddr can notify it changed.
1064  *
1065  * Returns 0 on success, error on failure.
1066  */
1067 static int smu_alloc_memory_pool(struct smu_context *smu)
1068 {
1069 	struct amdgpu_device *adev = smu->adev;
1070 	struct smu_table_context *smu_table = &smu->smu_table;
1071 	struct smu_table *memory_pool = &smu_table->memory_pool;
1072 	uint64_t pool_size = smu->pool_size;
1073 	int ret = 0;
1074 
1075 	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1076 		return ret;
1077 
1078 	memory_pool->size = pool_size;
1079 	memory_pool->align = PAGE_SIZE;
1080 	memory_pool->domain =
1081 		(adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ?
1082 			AMDGPU_GEM_DOMAIN_VRAM :
1083 			AMDGPU_GEM_DOMAIN_GTT;
1084 
1085 	switch (pool_size) {
1086 	case SMU_MEMORY_POOL_SIZE_256_MB:
1087 	case SMU_MEMORY_POOL_SIZE_512_MB:
1088 	case SMU_MEMORY_POOL_SIZE_1_GB:
1089 	case SMU_MEMORY_POOL_SIZE_2_GB:
1090 		ret = amdgpu_bo_create_kernel(adev,
1091 					      memory_pool->size,
1092 					      memory_pool->align,
1093 					      memory_pool->domain,
1094 					      &memory_pool->bo,
1095 					      &memory_pool->mc_address,
1096 					      &memory_pool->cpu_addr);
1097 		if (ret)
1098 			dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1099 		break;
1100 	default:
1101 		break;
1102 	}
1103 
1104 	return ret;
1105 }
1106 
1107 static int smu_free_memory_pool(struct smu_context *smu)
1108 {
1109 	struct smu_table_context *smu_table = &smu->smu_table;
1110 	struct smu_table *memory_pool = &smu_table->memory_pool;
1111 
1112 	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1113 		return 0;
1114 
1115 	amdgpu_bo_free_kernel(&memory_pool->bo,
1116 			      &memory_pool->mc_address,
1117 			      &memory_pool->cpu_addr);
1118 
1119 	memset(memory_pool, 0, sizeof(struct smu_table));
1120 
1121 	return 0;
1122 }
1123 
1124 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1125 {
1126 	struct smu_table_context *smu_table = &smu->smu_table;
1127 	struct smu_table *dummy_read_1_table =
1128 			&smu_table->dummy_read_1_table;
1129 	struct amdgpu_device *adev = smu->adev;
1130 	int ret = 0;
1131 
1132 	if (!dummy_read_1_table->size)
1133 		return 0;
1134 
1135 	ret = amdgpu_bo_create_kernel(adev,
1136 				      dummy_read_1_table->size,
1137 				      dummy_read_1_table->align,
1138 				      dummy_read_1_table->domain,
1139 				      &dummy_read_1_table->bo,
1140 				      &dummy_read_1_table->mc_address,
1141 				      &dummy_read_1_table->cpu_addr);
1142 	if (ret)
1143 		dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1144 
1145 	return ret;
1146 }
1147 
1148 static void smu_free_dummy_read_table(struct smu_context *smu)
1149 {
1150 	struct smu_table_context *smu_table = &smu->smu_table;
1151 	struct smu_table *dummy_read_1_table =
1152 			&smu_table->dummy_read_1_table;
1153 
1154 
1155 	amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1156 			      &dummy_read_1_table->mc_address,
1157 			      &dummy_read_1_table->cpu_addr);
1158 
1159 	memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1160 }
1161 
1162 static int smu_smc_table_sw_init(struct smu_context *smu)
1163 {
1164 	int ret;
1165 
1166 	/**
1167 	 * Create smu_table structure, and init smc tables such as
1168 	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1169 	 */
1170 	ret = smu_init_smc_tables(smu);
1171 	if (ret) {
1172 		dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1173 		return ret;
1174 	}
1175 
1176 	/**
1177 	 * Create smu_power_context structure, and allocate smu_dpm_context and
1178 	 * context size to fill the smu_power_context data.
1179 	 */
1180 	ret = smu_init_power(smu);
1181 	if (ret) {
1182 		dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1183 		return ret;
1184 	}
1185 
1186 	/*
1187 	 * allocate vram bos to store smc table contents.
1188 	 */
1189 	ret = smu_init_fb_allocations(smu);
1190 	if (ret)
1191 		return ret;
1192 
1193 	ret = smu_alloc_memory_pool(smu);
1194 	if (ret)
1195 		return ret;
1196 
1197 	ret = smu_alloc_dummy_read_table(smu);
1198 	if (ret)
1199 		return ret;
1200 
1201 	ret = smu_i2c_init(smu);
1202 	if (ret)
1203 		return ret;
1204 
1205 	return 0;
1206 }
1207 
1208 static int smu_smc_table_sw_fini(struct smu_context *smu)
1209 {
1210 	int ret;
1211 
1212 	smu_i2c_fini(smu);
1213 
1214 	smu_free_dummy_read_table(smu);
1215 
1216 	ret = smu_free_memory_pool(smu);
1217 	if (ret)
1218 		return ret;
1219 
1220 	ret = smu_fini_fb_allocations(smu);
1221 	if (ret)
1222 		return ret;
1223 
1224 	ret = smu_fini_power(smu);
1225 	if (ret) {
1226 		dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1227 		return ret;
1228 	}
1229 
1230 	ret = smu_fini_smc_tables(smu);
1231 	if (ret) {
1232 		dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1233 		return ret;
1234 	}
1235 
1236 	return 0;
1237 }
1238 
1239 static void smu_throttling_logging_work_fn(struct work_struct *work)
1240 {
1241 	struct smu_context *smu = container_of(work, struct smu_context,
1242 					       throttling_logging_work);
1243 
1244 	smu_log_thermal_throttling(smu);
1245 }
1246 
1247 static void smu_interrupt_work_fn(struct work_struct *work)
1248 {
1249 	struct smu_context *smu = container_of(work, struct smu_context,
1250 					       interrupt_work);
1251 
1252 	if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1253 		smu->ppt_funcs->interrupt_work(smu);
1254 }
1255 
1256 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1257 {
1258 	struct smu_context *smu =
1259 		container_of(work, struct smu_context, swctf_delayed_work.work);
1260 	struct smu_temperature_range *range =
1261 				&smu->thermal_range;
1262 	struct amdgpu_device *adev = smu->adev;
1263 	uint32_t hotspot_tmp, size;
1264 
1265 	/*
1266 	 * If the hotspot temperature is confirmed as below SW CTF setting point
1267 	 * after the delay enforced, nothing will be done.
1268 	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1269 	 */
1270 	if (range->software_shutdown_temp &&
1271 	    smu->ppt_funcs->read_sensor &&
1272 	    !smu->ppt_funcs->read_sensor(smu,
1273 					 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1274 					 &hotspot_tmp,
1275 					 &size) &&
1276 	    hotspot_tmp / 1000 < range->software_shutdown_temp)
1277 		return;
1278 
1279 	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1280 	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1281 	orderly_poweroff(true);
1282 }
1283 
1284 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1285 {
1286 	struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1287 	struct smu_dpm_policy_ctxt *policy_ctxt;
1288 	struct smu_dpm_policy *policy;
1289 
1290 	policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1291 	if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1292 		if (policy)
1293 			policy->current_level = XGMI_PLPD_DEFAULT;
1294 		return;
1295 	}
1296 
1297 	/* PMFW put PLPD into default policy after enabling the feature */
1298 	if (smu_feature_is_enabled(smu,
1299 				   SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1300 		if (policy)
1301 			policy->current_level = XGMI_PLPD_DEFAULT;
1302 	} else {
1303 		policy_ctxt = dpm_ctxt->dpm_policies;
1304 		if (policy_ctxt)
1305 			policy_ctxt->policy_mask &=
1306 				~BIT(PP_PM_POLICY_XGMI_PLPD);
1307 	}
1308 }
1309 
1310 static void smu_init_power_profile(struct smu_context *smu)
1311 {
1312 	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN)
1313 		smu->power_profile_mode =
1314 			PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1315 	smu_power_profile_mode_get(smu, smu->power_profile_mode);
1316 }
1317 
1318 void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id)
1319 {
1320 	struct smu_feature_cap *fea_cap = &smu->fea_cap;
1321 
1322 	if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
1323 		return;
1324 
1325 	set_bit(fea_id, fea_cap->cap_map);
1326 }
1327 
1328 bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id)
1329 {
1330 	struct smu_feature_cap *fea_cap = &smu->fea_cap;
1331 
1332 	if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
1333 		return false;
1334 
1335 	return test_bit(fea_id, fea_cap->cap_map);
1336 }
1337 
1338 static void smu_feature_cap_init(struct smu_context *smu)
1339 {
1340 	struct smu_feature_cap *fea_cap = &smu->fea_cap;
1341 
1342 	bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT);
1343 }
1344 
1345 static int smu_sw_init(struct amdgpu_ip_block *ip_block)
1346 {
1347 	struct amdgpu_device *adev = ip_block->adev;
1348 	struct smu_context *smu = adev->powerplay.pp_handle;
1349 	int i, ret;
1350 
1351 	smu->pool_size = adev->pm.smu_prv_buffer_size;
1352 	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1353 	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1354 	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1355 
1356 	INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1357 	INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1358 	atomic64_set(&smu->throttle_int_counter, 0);
1359 	smu->watermarks_bitmap = 0;
1360 
1361 	for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1362 		atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
1363 	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1364 	atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1365 	atomic_set(&smu->smu_power.power_gate.isp_gated, 1);
1366 	atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1367 
1368 	smu_init_power_profile(smu);
1369 	smu->display_config = &adev->pm.pm_display_cfg;
1370 
1371 	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1372 	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1373 
1374 	INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1375 			  smu_swctf_delayed_work_handler);
1376 
1377 	smu_feature_cap_init(smu);
1378 
1379 	ret = smu_smc_table_sw_init(smu);
1380 	if (ret) {
1381 		dev_err(adev->dev, "Failed to sw init smc table!\n");
1382 		return ret;
1383 	}
1384 
1385 	/* get boot_values from vbios to set revision, gfxclk, and etc. */
1386 	ret = smu_get_vbios_bootup_values(smu);
1387 	if (ret) {
1388 		dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1389 		return ret;
1390 	}
1391 
1392 	ret = smu_init_pptable_microcode(smu);
1393 	if (ret) {
1394 		dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1395 		return ret;
1396 	}
1397 
1398 	ret = smu_register_irq_handler(smu);
1399 	if (ret) {
1400 		dev_err(adev->dev, "Failed to register smc irq handler!\n");
1401 		return ret;
1402 	}
1403 
1404 	/* If there is no way to query fan control mode, fan control is not supported */
1405 	if (!smu->ppt_funcs->get_fan_control_mode)
1406 		smu->adev->pm.no_fan = true;
1407 
1408 	return 0;
1409 }
1410 
1411 static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
1412 {
1413 	struct amdgpu_device *adev = ip_block->adev;
1414 	struct smu_context *smu = adev->powerplay.pp_handle;
1415 	int ret;
1416 
1417 	ret = smu_smc_table_sw_fini(smu);
1418 	if (ret) {
1419 		dev_err(adev->dev, "Failed to sw fini smc table!\n");
1420 		return ret;
1421 	}
1422 
1423 	if (smu->custom_profile_params) {
1424 		kfree(smu->custom_profile_params);
1425 		smu->custom_profile_params = NULL;
1426 	}
1427 
1428 	smu_fini_microcode(smu);
1429 
1430 	return 0;
1431 }
1432 
1433 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1434 {
1435 	struct amdgpu_device *adev = smu->adev;
1436 	struct smu_temperature_range *range =
1437 				&smu->thermal_range;
1438 	int ret = 0;
1439 
1440 	if (!smu->ppt_funcs->get_thermal_temperature_range)
1441 		return 0;
1442 
1443 	ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1444 	if (ret)
1445 		return ret;
1446 
1447 	adev->pm.dpm.thermal.min_temp = range->min;
1448 	adev->pm.dpm.thermal.max_temp = range->max;
1449 	adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1450 	adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1451 	adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1452 	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1453 	adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1454 	adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1455 	adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1456 
1457 	return ret;
1458 }
1459 
1460 /**
1461  * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1462  *
1463  * @smu: smu_context pointer
1464  *
1465  * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1466  * Returns 0 on success, error on failure.
1467  */
1468 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1469 {
1470 	struct wbrf_ranges_in_out wbrf_exclusion = {0};
1471 	struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1472 	struct amdgpu_device *adev = smu->adev;
1473 	uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1474 	uint64_t start, end;
1475 	int ret, i, j;
1476 
1477 	ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1478 	if (ret) {
1479 		dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1480 		return ret;
1481 	}
1482 
1483 	/*
1484 	 * The exclusion ranges array we got might be filled with holes and duplicate
1485 	 * entries. For example:
1486 	 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1487 	 * We need to do some sortups to eliminate those holes and duplicate entries.
1488 	 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1489 	 */
1490 	for (i = 0; i < num_of_wbrf_ranges; i++) {
1491 		start = wifi_bands[i].start;
1492 		end = wifi_bands[i].end;
1493 
1494 		/* get the last valid entry to fill the intermediate hole */
1495 		if (!start && !end) {
1496 			for (j = num_of_wbrf_ranges - 1; j > i; j--)
1497 				if (wifi_bands[j].start && wifi_bands[j].end)
1498 					break;
1499 
1500 			/* no valid entry left */
1501 			if (j <= i)
1502 				break;
1503 
1504 			start = wifi_bands[i].start = wifi_bands[j].start;
1505 			end = wifi_bands[i].end = wifi_bands[j].end;
1506 			wifi_bands[j].start = 0;
1507 			wifi_bands[j].end = 0;
1508 			num_of_wbrf_ranges = j;
1509 		}
1510 
1511 		/* eliminate duplicate entries */
1512 		for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1513 			if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1514 				wifi_bands[j].start = 0;
1515 				wifi_bands[j].end = 0;
1516 			}
1517 		}
1518 	}
1519 
1520 	/* Send the sorted wifi_bands to PMFW */
1521 	ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1522 	/* Try to set the wifi_bands again */
1523 	if (unlikely(ret == -EBUSY)) {
1524 		mdelay(5);
1525 		ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1526 	}
1527 
1528 	return ret;
1529 }
1530 
1531 /**
1532  * smu_wbrf_event_handler - handle notify events
1533  *
1534  * @nb: notifier block
1535  * @action: event type
1536  * @_arg: event data
1537  *
1538  * Calls relevant amdgpu function in response to wbrf event
1539  * notification from kernel.
1540  */
1541 static int smu_wbrf_event_handler(struct notifier_block *nb,
1542 				  unsigned long action, void *_arg)
1543 {
1544 	struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1545 
1546 	switch (action) {
1547 	case WBRF_CHANGED:
1548 		schedule_delayed_work(&smu->wbrf_delayed_work,
1549 				      msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1550 		break;
1551 	default:
1552 		return NOTIFY_DONE;
1553 	}
1554 
1555 	return NOTIFY_OK;
1556 }
1557 
1558 /**
1559  * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1560  *
1561  * @work: struct work_struct pointer
1562  *
1563  * Flood is over and driver will consume the latest exclusion ranges.
1564  */
1565 static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1566 {
1567 	struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1568 
1569 	smu_wbrf_handle_exclusion_ranges(smu);
1570 }
1571 
1572 /**
1573  * smu_wbrf_support_check - check wbrf support
1574  *
1575  * @smu: smu_context pointer
1576  *
1577  * Verifies the ACPI interface whether wbrf is supported.
1578  */
1579 static void smu_wbrf_support_check(struct smu_context *smu)
1580 {
1581 	struct amdgpu_device *adev = smu->adev;
1582 
1583 	smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1584 							acpi_amd_wbrf_supported_consumer(adev->dev);
1585 
1586 	if (smu->wbrf_supported)
1587 		dev_info(adev->dev, "RF interference mitigation is supported\n");
1588 }
1589 
1590 /**
1591  * smu_wbrf_init - init driver wbrf support
1592  *
1593  * @smu: smu_context pointer
1594  *
1595  * Verifies the AMD ACPI interfaces and registers with the wbrf
1596  * notifier chain if wbrf feature is supported.
1597  * Returns 0 on success, error on failure.
1598  */
1599 static int smu_wbrf_init(struct smu_context *smu)
1600 {
1601 	int ret;
1602 
1603 	if (!smu->wbrf_supported)
1604 		return 0;
1605 
1606 	INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1607 
1608 	smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1609 	ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1610 	if (ret)
1611 		return ret;
1612 
1613 	/*
1614 	 * Some wifiband exclusion ranges may be already there
1615 	 * before our driver loaded. To make sure our driver
1616 	 * is awared of those exclusion ranges.
1617 	 */
1618 	schedule_delayed_work(&smu->wbrf_delayed_work,
1619 			      msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1620 
1621 	return 0;
1622 }
1623 
1624 /**
1625  * smu_wbrf_fini - tear down driver wbrf support
1626  *
1627  * @smu: smu_context pointer
1628  *
1629  * Unregisters with the wbrf notifier chain.
1630  */
1631 static void smu_wbrf_fini(struct smu_context *smu)
1632 {
1633 	if (!smu->wbrf_supported)
1634 		return;
1635 
1636 	amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1637 
1638 	cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1639 }
1640 
1641 static int smu_smc_hw_setup(struct smu_context *smu)
1642 {
1643 	struct smu_feature *feature = &smu->smu_feature;
1644 	struct amdgpu_device *adev = smu->adev;
1645 	uint8_t pcie_gen = 0, pcie_width = 0;
1646 	uint64_t features_supported;
1647 	int ret = 0;
1648 
1649 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1650 	case IP_VERSION(11, 0, 7):
1651 	case IP_VERSION(11, 0, 11):
1652 	case IP_VERSION(11, 5, 0):
1653 	case IP_VERSION(11, 5, 2):
1654 	case IP_VERSION(11, 0, 12):
1655 		if (adev->in_suspend && smu_is_dpm_running(smu)) {
1656 			dev_info(adev->dev, "dpm has been enabled\n");
1657 			ret = smu_system_features_control(smu, true);
1658 			if (ret)
1659 				dev_err(adev->dev, "Failed system features control!\n");
1660 			return ret;
1661 		}
1662 		break;
1663 	default:
1664 		break;
1665 	}
1666 
1667 	ret = smu_init_display_count(smu, 0);
1668 	if (ret) {
1669 		dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1670 		return ret;
1671 	}
1672 
1673 	ret = smu_set_driver_table_location(smu);
1674 	if (ret) {
1675 		dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1676 		return ret;
1677 	}
1678 
1679 	/*
1680 	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1681 	 */
1682 	ret = smu_set_tool_table_location(smu);
1683 	if (ret) {
1684 		dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1685 		return ret;
1686 	}
1687 
1688 	/*
1689 	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1690 	 * pool location.
1691 	 */
1692 	ret = smu_notify_memory_pool_location(smu);
1693 	if (ret) {
1694 		dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1695 		return ret;
1696 	}
1697 
1698 	/*
1699 	 * It is assumed the pptable used before runpm is same as
1700 	 * the one used afterwards. Thus, we can reuse the stored
1701 	 * copy and do not need to resetup the pptable again.
1702 	 */
1703 	if (!adev->in_runpm) {
1704 		ret = smu_setup_pptable(smu);
1705 		if (ret) {
1706 			dev_err(adev->dev, "Failed to setup pptable!\n");
1707 			return ret;
1708 		}
1709 	}
1710 
1711 	/* smu_dump_pptable(smu); */
1712 
1713 	/*
1714 	 * With SCPM enabled, PSP is responsible for the PPTable transferring
1715 	 * (to SMU). Driver involvement is not needed and permitted.
1716 	 */
1717 	if (!adev->scpm_enabled) {
1718 		/*
1719 		 * Copy pptable bo in the vram to smc with SMU MSGs such as
1720 		 * SetDriverDramAddr and TransferTableDram2Smu.
1721 		 */
1722 		ret = smu_write_pptable(smu);
1723 		if (ret) {
1724 			dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1725 			return ret;
1726 		}
1727 	}
1728 
1729 	/* issue Run*Btc msg */
1730 	ret = smu_run_btc(smu);
1731 	if (ret)
1732 		return ret;
1733 
1734 	/* Enable UclkShadow on wbrf supported */
1735 	if (smu->wbrf_supported) {
1736 		ret = smu_enable_uclk_shadow(smu, true);
1737 		if (ret) {
1738 			dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1739 			return ret;
1740 		}
1741 	}
1742 
1743 	/*
1744 	 * With SCPM enabled, these actions(and relevant messages) are
1745 	 * not needed and permitted.
1746 	 */
1747 	if (!adev->scpm_enabled) {
1748 		ret = smu_feature_set_allowed_mask(smu);
1749 		if (ret) {
1750 			dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1751 			return ret;
1752 		}
1753 	}
1754 
1755 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
1756 		pcie_gen = 4;
1757 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1758 		pcie_gen = 3;
1759 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1760 		pcie_gen = 2;
1761 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1762 		pcie_gen = 1;
1763 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1764 		pcie_gen = 0;
1765 
1766 	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1767 	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1768 	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
1769 	 */
1770 	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
1771 		pcie_width = 7;
1772 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1773 		pcie_width = 6;
1774 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1775 		pcie_width = 5;
1776 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1777 		pcie_width = 4;
1778 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1779 		pcie_width = 3;
1780 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1781 		pcie_width = 2;
1782 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1783 		pcie_width = 1;
1784 	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1785 	if (ret) {
1786 		dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1787 		return ret;
1788 	}
1789 
1790 	ret = smu_system_features_control(smu, true);
1791 	if (ret) {
1792 		dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1793 		return ret;
1794 	}
1795 
1796 	smu_init_xgmi_plpd_mode(smu);
1797 
1798 	ret = smu_feature_get_enabled_mask(smu, &features_supported);
1799 	if (ret) {
1800 		dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1801 		return ret;
1802 	}
1803 	bitmap_copy(feature->supported,
1804 		    (unsigned long *)&features_supported,
1805 		    feature->feature_num);
1806 
1807 	if (!smu_is_dpm_running(smu))
1808 		dev_info(adev->dev, "dpm has been disabled\n");
1809 
1810 	/*
1811 	 * Set initialized values (get from vbios) to dpm tables context such as
1812 	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1813 	 * type of clks.
1814 	 */
1815 	ret = smu_set_default_dpm_table(smu);
1816 	if (ret) {
1817 		dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1818 		return ret;
1819 	}
1820 
1821 	ret = smu_get_thermal_temperature_range(smu);
1822 	if (ret) {
1823 		dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1824 		return ret;
1825 	}
1826 
1827 	ret = smu_enable_thermal_alert(smu);
1828 	if (ret) {
1829 	  dev_err(adev->dev, "Failed to enable thermal alert!\n");
1830 	  return ret;
1831 	}
1832 
1833 	ret = smu_notify_display_change(smu);
1834 	if (ret) {
1835 		dev_err(adev->dev, "Failed to notify display change!\n");
1836 		return ret;
1837 	}
1838 
1839 	/*
1840 	 * Set min deep sleep dce fclk with bootup value from vbios via
1841 	 * SetMinDeepSleepDcefclk MSG.
1842 	 */
1843 	ret = smu_set_min_dcef_deep_sleep(smu,
1844 					  smu->smu_table.boot_values.dcefclk / 100);
1845 	if (ret) {
1846 		dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1847 		return ret;
1848 	}
1849 
1850 	/* Init wbrf support. Properly setup the notifier */
1851 	ret = smu_wbrf_init(smu);
1852 	if (ret)
1853 		dev_err(adev->dev, "Error during wbrf init call\n");
1854 
1855 	return ret;
1856 }
1857 
1858 static int smu_start_smc_engine(struct smu_context *smu)
1859 {
1860 	struct amdgpu_device *adev = smu->adev;
1861 	int ret = 0;
1862 
1863 	if (amdgpu_virt_xgmi_migrate_enabled(adev))
1864 		smu_update_gpu_addresses(smu);
1865 
1866 	smu->smc_fw_state = SMU_FW_INIT;
1867 
1868 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1869 		if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1870 			if (smu->ppt_funcs->load_microcode) {
1871 				ret = smu->ppt_funcs->load_microcode(smu);
1872 				if (ret)
1873 					return ret;
1874 			}
1875 		}
1876 	}
1877 
1878 	if (smu->ppt_funcs->check_fw_status) {
1879 		ret = smu->ppt_funcs->check_fw_status(smu);
1880 		if (ret) {
1881 			dev_err(adev->dev, "SMC is not ready\n");
1882 			return ret;
1883 		}
1884 	}
1885 
1886 	/*
1887 	 * Send msg GetDriverIfVersion to check if the return value is equal
1888 	 * with DRIVER_IF_VERSION of smc header.
1889 	 */
1890 	ret = smu_check_fw_version(smu);
1891 	if (ret)
1892 		return ret;
1893 
1894 	return ret;
1895 }
1896 
1897 static int smu_hw_init(struct amdgpu_ip_block *ip_block)
1898 {
1899 	int i, ret;
1900 	struct amdgpu_device *adev = ip_block->adev;
1901 	struct smu_context *smu = adev->powerplay.pp_handle;
1902 
1903 	if (amdgpu_sriov_multi_vf_mode(adev)) {
1904 		smu->pm_enabled = false;
1905 		return 0;
1906 	}
1907 
1908 	ret = smu_start_smc_engine(smu);
1909 	if (ret) {
1910 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1911 		return ret;
1912 	}
1913 
1914 	/*
1915 	 * Check whether wbrf is supported. This needs to be done
1916 	 * before SMU setup starts since part of SMU configuration
1917 	 * relies on this.
1918 	 */
1919 	smu_wbrf_support_check(smu);
1920 
1921 	if (smu->is_apu) {
1922 		ret = smu_set_gfx_imu_enable(smu);
1923 		if (ret)
1924 			return ret;
1925 		for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1926 			smu_dpm_set_vcn_enable(smu, true, i);
1927 		smu_dpm_set_jpeg_enable(smu, true);
1928 		smu_dpm_set_vpe_enable(smu, true);
1929 		smu_dpm_set_umsch_mm_enable(smu, true);
1930 		smu_set_mall_enable(smu);
1931 		smu_set_gfx_cgpg(smu, true);
1932 	}
1933 
1934 	if (!smu->pm_enabled)
1935 		return 0;
1936 
1937 	ret = smu_get_driver_allowed_feature_mask(smu);
1938 	if (ret)
1939 		return ret;
1940 
1941 	ret = smu_smc_hw_setup(smu);
1942 	if (ret) {
1943 		dev_err(adev->dev, "Failed to setup smc hw!\n");
1944 		return ret;
1945 	}
1946 
1947 	/*
1948 	 * Move maximum sustainable clock retrieving here considering
1949 	 * 1. It is not needed on resume(from S3).
1950 	 * 2. DAL settings come between .hw_init and .late_init of SMU.
1951 	 *    And DAL needs to know the maximum sustainable clocks. Thus
1952 	 *    it cannot be put in .late_init().
1953 	 */
1954 	ret = smu_init_max_sustainable_clocks(smu);
1955 	if (ret) {
1956 		dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1957 		return ret;
1958 	}
1959 
1960 	adev->pm.dpm_enabled = true;
1961 
1962 	dev_info(adev->dev, "SMU is initialized successfully!\n");
1963 
1964 	return 0;
1965 }
1966 
1967 static int smu_disable_dpms(struct smu_context *smu)
1968 {
1969 	struct amdgpu_device *adev = smu->adev;
1970 	int ret = 0;
1971 	bool use_baco = !smu->is_apu &&
1972 		((amdgpu_in_reset(adev) &&
1973 		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1974 		 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1975 
1976 	/*
1977 	 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1978 	 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1979 	 */
1980 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1981 	case IP_VERSION(13, 0, 0):
1982 	case IP_VERSION(13, 0, 7):
1983 	case IP_VERSION(13, 0, 10):
1984 	case IP_VERSION(14, 0, 2):
1985 	case IP_VERSION(14, 0, 3):
1986 		return 0;
1987 	default:
1988 		break;
1989 	}
1990 
1991 	/*
1992 	 * For custom pptable uploading, skip the DPM features
1993 	 * disable process on Navi1x ASICs.
1994 	 *   - As the gfx related features are under control of
1995 	 *     RLC on those ASICs. RLC reinitialization will be
1996 	 *     needed to reenable them. That will cost much more
1997 	 *     efforts.
1998 	 *
1999 	 *   - SMU firmware can handle the DPM reenablement
2000 	 *     properly.
2001 	 */
2002 	if (smu->uploading_custom_pp_table) {
2003 		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2004 		case IP_VERSION(11, 0, 0):
2005 		case IP_VERSION(11, 0, 5):
2006 		case IP_VERSION(11, 0, 9):
2007 		case IP_VERSION(11, 0, 7):
2008 		case IP_VERSION(11, 0, 11):
2009 		case IP_VERSION(11, 5, 0):
2010 		case IP_VERSION(11, 5, 2):
2011 		case IP_VERSION(11, 0, 12):
2012 		case IP_VERSION(11, 0, 13):
2013 			return 0;
2014 		default:
2015 			break;
2016 		}
2017 	}
2018 
2019 	/*
2020 	 * For Sienna_Cichlid, PMFW will handle the features disablement properly
2021 	 * on BACO in. Driver involvement is unnecessary.
2022 	 */
2023 	if (use_baco) {
2024 		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2025 		case IP_VERSION(11, 0, 7):
2026 		case IP_VERSION(11, 0, 0):
2027 		case IP_VERSION(11, 0, 5):
2028 		case IP_VERSION(11, 0, 9):
2029 		case IP_VERSION(13, 0, 7):
2030 			return 0;
2031 		default:
2032 			break;
2033 		}
2034 	}
2035 
2036 	/*
2037 	 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
2038 	 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
2039 	 */
2040 	if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
2041 	    smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
2042 		return 0;
2043 
2044 	/*
2045 	 * For gpu reset, runpm and hibernation through BACO,
2046 	 * BACO feature has to be kept enabled.
2047 	 */
2048 	if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
2049 		ret = smu_disable_all_features_with_exception(smu,
2050 							      SMU_FEATURE_BACO_BIT);
2051 		if (ret)
2052 			dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
2053 	} else {
2054 		/* DisableAllSmuFeatures message is not permitted with SCPM enabled */
2055 		if (!adev->scpm_enabled) {
2056 			ret = smu_system_features_control(smu, false);
2057 			if (ret)
2058 				dev_err(adev->dev, "Failed to disable smu features.\n");
2059 		}
2060 	}
2061 
2062 	/* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
2063 	 * otherwise SMU will hang while interacting with RLC if RLC is halted
2064 	 * this is a WA for Vangogh asic which fix the SMU hang issue.
2065 	 */
2066 	ret = smu_notify_rlc_state(smu, false);
2067 	if (ret) {
2068 		dev_err(adev->dev, "Fail to notify rlc status!\n");
2069 		return ret;
2070 	}
2071 
2072 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
2073 	    !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
2074 	    !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
2075 		adev->gfx.rlc.funcs->stop(adev);
2076 
2077 	return ret;
2078 }
2079 
2080 static int smu_smc_hw_cleanup(struct smu_context *smu)
2081 {
2082 	struct amdgpu_device *adev = smu->adev;
2083 	int ret = 0;
2084 
2085 	smu_wbrf_fini(smu);
2086 
2087 	cancel_work_sync(&smu->throttling_logging_work);
2088 	cancel_work_sync(&smu->interrupt_work);
2089 
2090 	ret = smu_disable_thermal_alert(smu);
2091 	if (ret) {
2092 		dev_err(adev->dev, "Fail to disable thermal alert!\n");
2093 		return ret;
2094 	}
2095 
2096 	cancel_delayed_work_sync(&smu->swctf_delayed_work);
2097 
2098 	ret = smu_disable_dpms(smu);
2099 	if (ret) {
2100 		dev_err(adev->dev, "Fail to disable dpm features!\n");
2101 		return ret;
2102 	}
2103 
2104 	return 0;
2105 }
2106 
2107 static int smu_reset_mp1_state(struct smu_context *smu)
2108 {
2109 	struct amdgpu_device *adev = smu->adev;
2110 	int ret = 0;
2111 
2112 	if ((!adev->in_runpm) && (!adev->in_suspend) &&
2113 		(!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2114 									IP_VERSION(13, 0, 10) &&
2115 		!amdgpu_device_has_display_hardware(adev))
2116 		ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2117 
2118 	return ret;
2119 }
2120 
2121 static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
2122 {
2123 	struct amdgpu_device *adev = ip_block->adev;
2124 	struct smu_context *smu = adev->powerplay.pp_handle;
2125 	int i, ret;
2126 
2127 	if (amdgpu_sriov_multi_vf_mode(adev))
2128 		return 0;
2129 
2130 	for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2131 		smu_dpm_set_vcn_enable(smu, false, i);
2132 		adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE;
2133 	}
2134 	smu_dpm_set_jpeg_enable(smu, false);
2135 	adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2136 	smu_dpm_set_vpe_enable(smu, false);
2137 	smu_dpm_set_umsch_mm_enable(smu, false);
2138 
2139 	if (!smu->pm_enabled)
2140 		return 0;
2141 
2142 	adev->pm.dpm_enabled = false;
2143 
2144 	ret = smu_smc_hw_cleanup(smu);
2145 	if (ret)
2146 		return ret;
2147 
2148 	ret = smu_reset_mp1_state(smu);
2149 	if (ret)
2150 		return ret;
2151 
2152 	return 0;
2153 }
2154 
2155 static void smu_late_fini(struct amdgpu_ip_block *ip_block)
2156 {
2157 	struct amdgpu_device *adev = ip_block->adev;
2158 	struct smu_context *smu = adev->powerplay.pp_handle;
2159 
2160 	kfree(smu);
2161 }
2162 
2163 static int smu_reset(struct smu_context *smu)
2164 {
2165 	struct amdgpu_device *adev = smu->adev;
2166 	struct amdgpu_ip_block *ip_block;
2167 	int ret;
2168 
2169 	ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
2170 	if (!ip_block)
2171 		return -EINVAL;
2172 
2173 	ret = smu_hw_fini(ip_block);
2174 	if (ret)
2175 		return ret;
2176 
2177 	ret = smu_hw_init(ip_block);
2178 	if (ret)
2179 		return ret;
2180 
2181 	ret = smu_late_init(ip_block);
2182 	if (ret)
2183 		return ret;
2184 
2185 	return 0;
2186 }
2187 
2188 static int smu_suspend(struct amdgpu_ip_block *ip_block)
2189 {
2190 	struct amdgpu_device *adev = ip_block->adev;
2191 	struct smu_context *smu = adev->powerplay.pp_handle;
2192 	int ret;
2193 	uint64_t count;
2194 
2195 	if (amdgpu_sriov_multi_vf_mode(adev))
2196 		return 0;
2197 
2198 	if (!smu->pm_enabled)
2199 		return 0;
2200 
2201 	adev->pm.dpm_enabled = false;
2202 
2203 	ret = smu_smc_hw_cleanup(smu);
2204 	if (ret)
2205 		return ret;
2206 
2207 	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2208 
2209 	smu_set_gfx_cgpg(smu, false);
2210 
2211 	/*
2212 	 * pwfw resets entrycount when device is suspended, so we save the
2213 	 * last value to be used when we resume to keep it consistent
2214 	 */
2215 	ret = smu_get_entrycount_gfxoff(smu, &count);
2216 	if (!ret)
2217 		adev->gfx.gfx_off_entrycount = count;
2218 
2219 	/* clear this on suspend so it will get reprogrammed on resume */
2220 	smu->workload_mask = 0;
2221 
2222 	return 0;
2223 }
2224 
2225 static int smu_resume(struct amdgpu_ip_block *ip_block)
2226 {
2227 	int ret;
2228 	struct amdgpu_device *adev = ip_block->adev;
2229 	struct smu_context *smu = adev->powerplay.pp_handle;
2230 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2231 
2232 	if (amdgpu_sriov_multi_vf_mode(adev))
2233 		return 0;
2234 
2235 	if (!smu->pm_enabled)
2236 		return 0;
2237 
2238 	dev_info(adev->dev, "SMU is resuming...\n");
2239 
2240 	ret = smu_start_smc_engine(smu);
2241 	if (ret) {
2242 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
2243 		return ret;
2244 	}
2245 
2246 	ret = smu_smc_hw_setup(smu);
2247 	if (ret) {
2248 		dev_err(adev->dev, "Failed to setup smc hw!\n");
2249 		return ret;
2250 	}
2251 
2252 	ret = smu_set_gfx_imu_enable(smu);
2253 	if (ret)
2254 		return ret;
2255 
2256 	smu_set_gfx_cgpg(smu, true);
2257 
2258 	smu->disable_uclk_switch = 0;
2259 
2260 	adev->pm.dpm_enabled = true;
2261 
2262 	if (smu->current_power_limit) {
2263 		ret = smu_set_power_limit(smu, smu->current_power_limit);
2264 		if (ret && ret != -EOPNOTSUPP)
2265 			return ret;
2266 	}
2267 
2268 	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
2269 		ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
2270 		if (ret)
2271 			return ret;
2272 	}
2273 
2274 	dev_info(adev->dev, "SMU is resumed successfully!\n");
2275 
2276 	return 0;
2277 }
2278 
2279 static int smu_display_configuration_change(void *handle,
2280 					    const struct amd_pp_display_configuration *display_config)
2281 {
2282 	struct smu_context *smu = handle;
2283 
2284 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2285 		return -EOPNOTSUPP;
2286 
2287 	if (!display_config)
2288 		return -EINVAL;
2289 
2290 	smu_set_min_dcef_deep_sleep(smu,
2291 				    display_config->min_dcef_deep_sleep_set_clk / 100);
2292 
2293 	return 0;
2294 }
2295 
2296 static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2297 				     enum amd_clockgating_state state)
2298 {
2299 	return 0;
2300 }
2301 
2302 static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block,
2303 				     enum amd_powergating_state state)
2304 {
2305 	return 0;
2306 }
2307 
2308 static int smu_enable_umd_pstate(void *handle,
2309 		      enum amd_dpm_forced_level *level)
2310 {
2311 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2312 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2313 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2314 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2315 
2316 	struct smu_context *smu = (struct smu_context*)(handle);
2317 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2318 
2319 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2320 		return -EINVAL;
2321 
2322 	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2323 		/* enter umd pstate, save current level, disable gfx cg*/
2324 		if (*level & profile_mode_mask) {
2325 			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2326 			smu_gpo_control(smu, false);
2327 			smu_gfx_ulv_control(smu, false);
2328 			smu_deep_sleep_control(smu, false);
2329 			amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2330 		}
2331 	} else {
2332 		/* exit umd pstate, restore level, enable gfx cg*/
2333 		if (!(*level & profile_mode_mask)) {
2334 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2335 				*level = smu_dpm_ctx->saved_dpm_level;
2336 			amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2337 			smu_deep_sleep_control(smu, true);
2338 			smu_gfx_ulv_control(smu, true);
2339 			smu_gpo_control(smu, true);
2340 		}
2341 	}
2342 
2343 	return 0;
2344 }
2345 
2346 static int smu_bump_power_profile_mode(struct smu_context *smu,
2347 				       long *custom_params,
2348 				       u32 custom_params_max_idx)
2349 {
2350 	u32 workload_mask = 0;
2351 	int i, ret = 0;
2352 
2353 	for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
2354 		if (smu->workload_refcount[i])
2355 			workload_mask |= 1 << i;
2356 	}
2357 
2358 	if (smu->workload_mask == workload_mask)
2359 		return 0;
2360 
2361 	if (smu->ppt_funcs->set_power_profile_mode)
2362 		ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
2363 							     custom_params,
2364 							     custom_params_max_idx);
2365 
2366 	if (!ret)
2367 		smu->workload_mask = workload_mask;
2368 
2369 	return ret;
2370 }
2371 
2372 static void smu_power_profile_mode_get(struct smu_context *smu,
2373 				       enum PP_SMC_POWER_PROFILE profile_mode)
2374 {
2375 	smu->workload_refcount[profile_mode]++;
2376 }
2377 
2378 static void smu_power_profile_mode_put(struct smu_context *smu,
2379 				       enum PP_SMC_POWER_PROFILE profile_mode)
2380 {
2381 	if (smu->workload_refcount[profile_mode])
2382 		smu->workload_refcount[profile_mode]--;
2383 }
2384 
2385 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2386 					  enum amd_dpm_forced_level level,
2387 					  bool skip_display_settings)
2388 {
2389 	int ret = 0;
2390 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2391 
2392 	if (!skip_display_settings) {
2393 		ret = smu_display_config_changed(smu);
2394 		if (ret) {
2395 			dev_err(smu->adev->dev, "Failed to change display config!");
2396 			return ret;
2397 		}
2398 	}
2399 
2400 	ret = smu_apply_clocks_adjust_rules(smu);
2401 	if (ret) {
2402 		dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2403 		return ret;
2404 	}
2405 
2406 	if (!skip_display_settings) {
2407 		ret = smu_notify_smc_display_config(smu);
2408 		if (ret) {
2409 			dev_err(smu->adev->dev, "Failed to notify smc display config!");
2410 			return ret;
2411 		}
2412 	}
2413 
2414 	if (smu_dpm_ctx->dpm_level != level) {
2415 		ret = smu_asic_set_performance_level(smu, level);
2416 		if (ret) {
2417 			if (ret == -EOPNOTSUPP)
2418 				dev_info(smu->adev->dev, "set performance level %d not supported",
2419 						level);
2420 			else
2421 				dev_err(smu->adev->dev, "Failed to set performance level %d",
2422 						level);
2423 			return ret;
2424 		}
2425 
2426 		/* update the saved copy */
2427 		smu_dpm_ctx->dpm_level = level;
2428 	}
2429 
2430 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2431 	    smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2432 		smu_bump_power_profile_mode(smu, NULL, 0);
2433 
2434 	return ret;
2435 }
2436 
2437 static int smu_handle_task(struct smu_context *smu,
2438 			   enum amd_dpm_forced_level level,
2439 			   enum amd_pp_task task_id)
2440 {
2441 	int ret = 0;
2442 
2443 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2444 		return -EOPNOTSUPP;
2445 
2446 	switch (task_id) {
2447 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2448 		ret = smu_pre_display_config_changed(smu);
2449 		if (ret)
2450 			return ret;
2451 		ret = smu_adjust_power_state_dynamic(smu, level, false);
2452 		break;
2453 	case AMD_PP_TASK_COMPLETE_INIT:
2454 		ret = smu_adjust_power_state_dynamic(smu, level, true);
2455 		break;
2456 	case AMD_PP_TASK_READJUST_POWER_STATE:
2457 		ret = smu_adjust_power_state_dynamic(smu, level, true);
2458 		break;
2459 	default:
2460 		break;
2461 	}
2462 
2463 	return ret;
2464 }
2465 
2466 static int smu_handle_dpm_task(void *handle,
2467 			       enum amd_pp_task task_id,
2468 			       enum amd_pm_state_type *user_state)
2469 {
2470 	struct smu_context *smu = handle;
2471 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2472 
2473 	return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2474 
2475 }
2476 
2477 static int smu_switch_power_profile(void *handle,
2478 				    enum PP_SMC_POWER_PROFILE type,
2479 				    bool enable)
2480 {
2481 	struct smu_context *smu = handle;
2482 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2483 	int ret;
2484 
2485 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2486 		return -EOPNOTSUPP;
2487 
2488 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2489 		return -EINVAL;
2490 
2491 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2492 	    smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2493 		if (enable)
2494 			smu_power_profile_mode_get(smu, type);
2495 		else
2496 			smu_power_profile_mode_put(smu, type);
2497 		/* don't switch the active workload when paused */
2498 		if (smu->pause_workload)
2499 			ret = 0;
2500 		else
2501 			ret = smu_bump_power_profile_mode(smu, NULL, 0);
2502 		if (ret) {
2503 			if (enable)
2504 				smu_power_profile_mode_put(smu, type);
2505 			else
2506 				smu_power_profile_mode_get(smu, type);
2507 			return ret;
2508 		}
2509 	}
2510 
2511 	return 0;
2512 }
2513 
2514 static int smu_pause_power_profile(void *handle,
2515 				   bool pause)
2516 {
2517 	struct smu_context *smu = handle;
2518 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2519 	u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
2520 	int ret;
2521 
2522 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2523 		return -EOPNOTSUPP;
2524 
2525 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2526 	    smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2527 		smu->pause_workload = pause;
2528 
2529 		/* force to bootup default profile */
2530 		if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
2531 			ret = smu->ppt_funcs->set_power_profile_mode(smu,
2532 								     workload_mask,
2533 								     NULL,
2534 								     0);
2535 		else
2536 			ret = smu_bump_power_profile_mode(smu, NULL, 0);
2537 		return ret;
2538 	}
2539 
2540 	return 0;
2541 }
2542 
2543 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2544 {
2545 	struct smu_context *smu = handle;
2546 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2547 
2548 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2549 		return -EOPNOTSUPP;
2550 
2551 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2552 		return -EINVAL;
2553 
2554 	return smu_dpm_ctx->dpm_level;
2555 }
2556 
2557 static int smu_force_performance_level(void *handle,
2558 				       enum amd_dpm_forced_level level)
2559 {
2560 	struct smu_context *smu = handle;
2561 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2562 	int ret = 0;
2563 
2564 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2565 		return -EOPNOTSUPP;
2566 
2567 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2568 		return -EINVAL;
2569 
2570 	ret = smu_enable_umd_pstate(smu, &level);
2571 	if (ret)
2572 		return ret;
2573 
2574 	ret = smu_handle_task(smu, level,
2575 			      AMD_PP_TASK_READJUST_POWER_STATE);
2576 
2577 	/* reset user dpm clock state */
2578 	if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2579 		memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2580 		smu->user_dpm_profile.clk_dependency = 0;
2581 	}
2582 
2583 	return ret;
2584 }
2585 
2586 static int smu_set_display_count(void *handle, uint32_t count)
2587 {
2588 	struct smu_context *smu = handle;
2589 
2590 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2591 		return -EOPNOTSUPP;
2592 
2593 	return smu_init_display_count(smu, count);
2594 }
2595 
2596 static int smu_force_smuclk_levels(struct smu_context *smu,
2597 			 enum smu_clk_type clk_type,
2598 			 uint32_t mask)
2599 {
2600 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2601 	int ret = 0;
2602 
2603 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2604 		return -EOPNOTSUPP;
2605 
2606 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2607 		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2608 		return -EINVAL;
2609 	}
2610 
2611 	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2612 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2613 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2614 			smu->user_dpm_profile.clk_mask[clk_type] = mask;
2615 			smu_set_user_clk_dependencies(smu, clk_type);
2616 		}
2617 	}
2618 
2619 	return ret;
2620 }
2621 
2622 static int smu_force_ppclk_levels(void *handle,
2623 				  enum pp_clock_type type,
2624 				  uint32_t mask)
2625 {
2626 	struct smu_context *smu = handle;
2627 	enum smu_clk_type clk_type;
2628 
2629 	switch (type) {
2630 	case PP_SCLK:
2631 		clk_type = SMU_SCLK; break;
2632 	case PP_MCLK:
2633 		clk_type = SMU_MCLK; break;
2634 	case PP_PCIE:
2635 		clk_type = SMU_PCIE; break;
2636 	case PP_SOCCLK:
2637 		clk_type = SMU_SOCCLK; break;
2638 	case PP_FCLK:
2639 		clk_type = SMU_FCLK; break;
2640 	case PP_DCEFCLK:
2641 		clk_type = SMU_DCEFCLK; break;
2642 	case PP_VCLK:
2643 		clk_type = SMU_VCLK; break;
2644 	case PP_VCLK1:
2645 		clk_type = SMU_VCLK1; break;
2646 	case PP_DCLK:
2647 		clk_type = SMU_DCLK; break;
2648 	case PP_DCLK1:
2649 		clk_type = SMU_DCLK1; break;
2650 	case OD_SCLK:
2651 		clk_type = SMU_OD_SCLK; break;
2652 	case OD_MCLK:
2653 		clk_type = SMU_OD_MCLK; break;
2654 	case OD_VDDC_CURVE:
2655 		clk_type = SMU_OD_VDDC_CURVE; break;
2656 	case OD_RANGE:
2657 		clk_type = SMU_OD_RANGE; break;
2658 	default:
2659 		return -EINVAL;
2660 	}
2661 
2662 	return smu_force_smuclk_levels(smu, clk_type, mask);
2663 }
2664 
2665 /*
2666  * On system suspending or resetting, the dpm_enabled
2667  * flag will be cleared. So that those SMU services which
2668  * are not supported will be gated.
2669  * However, the mp1 state setting should still be granted
2670  * even if the dpm_enabled cleared.
2671  */
2672 static int smu_set_mp1_state(void *handle,
2673 			     enum pp_mp1_state mp1_state)
2674 {
2675 	struct smu_context *smu = handle;
2676 	int ret = 0;
2677 
2678 	if (!smu->pm_enabled)
2679 		return -EOPNOTSUPP;
2680 
2681 	if (smu->ppt_funcs &&
2682 	    smu->ppt_funcs->set_mp1_state)
2683 		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2684 
2685 	return ret;
2686 }
2687 
2688 static int smu_set_df_cstate(void *handle,
2689 			     enum pp_df_cstate state)
2690 {
2691 	struct smu_context *smu = handle;
2692 	int ret = 0;
2693 
2694 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2695 		return -EOPNOTSUPP;
2696 
2697 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2698 		return 0;
2699 
2700 	ret = smu->ppt_funcs->set_df_cstate(smu, state);
2701 	if (ret)
2702 		dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2703 
2704 	return ret;
2705 }
2706 
2707 int smu_write_watermarks_table(struct smu_context *smu)
2708 {
2709 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2710 		return -EOPNOTSUPP;
2711 
2712 	return smu_set_watermarks_table(smu, NULL);
2713 }
2714 
2715 static int smu_set_watermarks_for_clock_ranges(void *handle,
2716 					       struct pp_smu_wm_range_sets *clock_ranges)
2717 {
2718 	struct smu_context *smu = handle;
2719 
2720 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2721 		return -EOPNOTSUPP;
2722 
2723 	if (smu->disable_watermark)
2724 		return 0;
2725 
2726 	return smu_set_watermarks_table(smu, clock_ranges);
2727 }
2728 
2729 int smu_set_ac_dc(struct smu_context *smu)
2730 {
2731 	int ret = 0;
2732 
2733 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2734 		return -EOPNOTSUPP;
2735 
2736 	/* controlled by firmware */
2737 	if (smu->dc_controlled_by_gpio)
2738 		return 0;
2739 
2740 	ret = smu_set_power_source(smu,
2741 				   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2742 				   SMU_POWER_SOURCE_DC);
2743 	if (ret)
2744 		dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2745 		       smu->adev->pm.ac_power ? "AC" : "DC");
2746 
2747 	return ret;
2748 }
2749 
2750 const struct amd_ip_funcs smu_ip_funcs = {
2751 	.name = "smu",
2752 	.early_init = smu_early_init,
2753 	.late_init = smu_late_init,
2754 	.sw_init = smu_sw_init,
2755 	.sw_fini = smu_sw_fini,
2756 	.hw_init = smu_hw_init,
2757 	.hw_fini = smu_hw_fini,
2758 	.late_fini = smu_late_fini,
2759 	.suspend = smu_suspend,
2760 	.resume = smu_resume,
2761 	.is_idle = NULL,
2762 	.check_soft_reset = NULL,
2763 	.wait_for_idle = NULL,
2764 	.soft_reset = NULL,
2765 	.set_clockgating_state = smu_set_clockgating_state,
2766 	.set_powergating_state = smu_set_powergating_state,
2767 };
2768 
2769 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2770 	.type = AMD_IP_BLOCK_TYPE_SMC,
2771 	.major = 11,
2772 	.minor = 0,
2773 	.rev = 0,
2774 	.funcs = &smu_ip_funcs,
2775 };
2776 
2777 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2778 	.type = AMD_IP_BLOCK_TYPE_SMC,
2779 	.major = 12,
2780 	.minor = 0,
2781 	.rev = 0,
2782 	.funcs = &smu_ip_funcs,
2783 };
2784 
2785 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2786 	.type = AMD_IP_BLOCK_TYPE_SMC,
2787 	.major = 13,
2788 	.minor = 0,
2789 	.rev = 0,
2790 	.funcs = &smu_ip_funcs,
2791 };
2792 
2793 const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2794 	.type = AMD_IP_BLOCK_TYPE_SMC,
2795 	.major = 14,
2796 	.minor = 0,
2797 	.rev = 0,
2798 	.funcs = &smu_ip_funcs,
2799 };
2800 
2801 static int smu_load_microcode(void *handle)
2802 {
2803 	struct smu_context *smu = handle;
2804 	struct amdgpu_device *adev = smu->adev;
2805 	int ret = 0;
2806 
2807 	if (!smu->pm_enabled)
2808 		return -EOPNOTSUPP;
2809 
2810 	/* This should be used for non PSP loading */
2811 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2812 		return 0;
2813 
2814 	if (smu->ppt_funcs->load_microcode) {
2815 		ret = smu->ppt_funcs->load_microcode(smu);
2816 		if (ret) {
2817 			dev_err(adev->dev, "Load microcode failed\n");
2818 			return ret;
2819 		}
2820 	}
2821 
2822 	if (smu->ppt_funcs->check_fw_status) {
2823 		ret = smu->ppt_funcs->check_fw_status(smu);
2824 		if (ret) {
2825 			dev_err(adev->dev, "SMC is not ready\n");
2826 			return ret;
2827 		}
2828 	}
2829 
2830 	return ret;
2831 }
2832 
2833 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2834 {
2835 	int ret = 0;
2836 
2837 	if (smu->ppt_funcs->set_gfx_cgpg)
2838 		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2839 
2840 	return ret;
2841 }
2842 
2843 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2844 {
2845 	struct smu_context *smu = handle;
2846 	int ret = 0;
2847 
2848 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2849 		return -EOPNOTSUPP;
2850 
2851 	if (!smu->ppt_funcs->set_fan_speed_rpm)
2852 		return -EOPNOTSUPP;
2853 
2854 	if (speed == U32_MAX)
2855 		return -EINVAL;
2856 
2857 	ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2858 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2859 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2860 		smu->user_dpm_profile.fan_speed_rpm = speed;
2861 
2862 		/* Override custom PWM setting as they cannot co-exist */
2863 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2864 		smu->user_dpm_profile.fan_speed_pwm = 0;
2865 	}
2866 
2867 	return ret;
2868 }
2869 
2870 /**
2871  * smu_get_power_limit - Request one of the SMU Power Limits
2872  *
2873  * @handle: pointer to smu context
2874  * @limit: requested limit is written back to this variable
2875  * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2876  * @pp_power_type: &pp_power_type type of power
2877  * Return:  0 on success, <0 on error
2878  *
2879  */
2880 int smu_get_power_limit(void *handle,
2881 			uint32_t *limit,
2882 			enum pp_power_limit_level pp_limit_level,
2883 			enum pp_power_type pp_power_type)
2884 {
2885 	struct smu_context *smu = handle;
2886 	struct amdgpu_device *adev = smu->adev;
2887 	enum smu_ppt_limit_level limit_level;
2888 	uint32_t limit_type;
2889 	int ret = 0;
2890 
2891 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2892 		return -EOPNOTSUPP;
2893 
2894 	switch (pp_power_type) {
2895 	case PP_PWR_TYPE_SUSTAINED:
2896 		limit_type = SMU_DEFAULT_PPT_LIMIT;
2897 		break;
2898 	case PP_PWR_TYPE_FAST:
2899 		limit_type = SMU_FAST_PPT_LIMIT;
2900 		break;
2901 	default:
2902 		return -EOPNOTSUPP;
2903 	}
2904 
2905 	switch (pp_limit_level) {
2906 	case PP_PWR_LIMIT_CURRENT:
2907 		limit_level = SMU_PPT_LIMIT_CURRENT;
2908 		break;
2909 	case PP_PWR_LIMIT_DEFAULT:
2910 		limit_level = SMU_PPT_LIMIT_DEFAULT;
2911 		break;
2912 	case PP_PWR_LIMIT_MAX:
2913 		limit_level = SMU_PPT_LIMIT_MAX;
2914 		break;
2915 	case PP_PWR_LIMIT_MIN:
2916 		limit_level = SMU_PPT_LIMIT_MIN;
2917 		break;
2918 	default:
2919 		return -EOPNOTSUPP;
2920 	}
2921 
2922 	if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2923 		if (smu->ppt_funcs->get_ppt_limit)
2924 			ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2925 	} else {
2926 		switch (limit_level) {
2927 		case SMU_PPT_LIMIT_CURRENT:
2928 			switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2929 			case IP_VERSION(13, 0, 2):
2930 			case IP_VERSION(13, 0, 6):
2931 			case IP_VERSION(13, 0, 12):
2932 			case IP_VERSION(13, 0, 14):
2933 			case IP_VERSION(11, 0, 7):
2934 			case IP_VERSION(11, 0, 11):
2935 			case IP_VERSION(11, 0, 12):
2936 			case IP_VERSION(11, 0, 13):
2937 				ret = smu_get_asic_power_limits(smu,
2938 								&smu->current_power_limit,
2939 								NULL, NULL, NULL);
2940 				break;
2941 			default:
2942 				break;
2943 			}
2944 			*limit = smu->current_power_limit;
2945 			break;
2946 		case SMU_PPT_LIMIT_DEFAULT:
2947 			*limit = smu->default_power_limit;
2948 			break;
2949 		case SMU_PPT_LIMIT_MAX:
2950 			*limit = smu->max_power_limit;
2951 			break;
2952 		case SMU_PPT_LIMIT_MIN:
2953 			*limit = smu->min_power_limit;
2954 			break;
2955 		default:
2956 			return -EINVAL;
2957 		}
2958 	}
2959 
2960 	return ret;
2961 }
2962 
2963 static int smu_set_power_limit(void *handle, uint32_t limit)
2964 {
2965 	struct smu_context *smu = handle;
2966 	uint32_t limit_type = limit >> 24;
2967 	int ret = 0;
2968 
2969 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2970 		return -EOPNOTSUPP;
2971 
2972 	limit &= (1<<24)-1;
2973 	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2974 		if (smu->ppt_funcs->set_power_limit)
2975 			return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2976 
2977 	if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2978 		dev_err(smu->adev->dev,
2979 			"New power limit (%d) is out of range [%d,%d]\n",
2980 			limit, smu->min_power_limit, smu->max_power_limit);
2981 		return -EINVAL;
2982 	}
2983 
2984 	if (!limit)
2985 		limit = smu->current_power_limit;
2986 
2987 	if (smu->ppt_funcs->set_power_limit) {
2988 		ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2989 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2990 			smu->user_dpm_profile.power_limit = limit;
2991 	}
2992 
2993 	return ret;
2994 }
2995 
2996 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2997 {
2998 	int ret = 0;
2999 
3000 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3001 		return -EOPNOTSUPP;
3002 
3003 	if (smu->ppt_funcs->print_clk_levels)
3004 		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
3005 
3006 	return ret;
3007 }
3008 
3009 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
3010 {
3011 	enum smu_clk_type clk_type;
3012 
3013 	switch (type) {
3014 	case PP_SCLK:
3015 		clk_type = SMU_SCLK; break;
3016 	case PP_MCLK:
3017 		clk_type = SMU_MCLK; break;
3018 	case PP_PCIE:
3019 		clk_type = SMU_PCIE; break;
3020 	case PP_SOCCLK:
3021 		clk_type = SMU_SOCCLK; break;
3022 	case PP_FCLK:
3023 		clk_type = SMU_FCLK; break;
3024 	case PP_DCEFCLK:
3025 		clk_type = SMU_DCEFCLK; break;
3026 	case PP_VCLK:
3027 		clk_type = SMU_VCLK; break;
3028 	case PP_VCLK1:
3029 		clk_type = SMU_VCLK1; break;
3030 	case PP_DCLK:
3031 		clk_type = SMU_DCLK; break;
3032 	case PP_DCLK1:
3033 		clk_type = SMU_DCLK1; break;
3034 	case PP_ISPICLK:
3035 		clk_type = SMU_ISPICLK;
3036 		break;
3037 	case PP_ISPXCLK:
3038 		clk_type = SMU_ISPXCLK;
3039 		break;
3040 	case OD_SCLK:
3041 		clk_type = SMU_OD_SCLK; break;
3042 	case OD_MCLK:
3043 		clk_type = SMU_OD_MCLK; break;
3044 	case OD_VDDC_CURVE:
3045 		clk_type = SMU_OD_VDDC_CURVE; break;
3046 	case OD_RANGE:
3047 		clk_type = SMU_OD_RANGE; break;
3048 	case OD_VDDGFX_OFFSET:
3049 		clk_type = SMU_OD_VDDGFX_OFFSET; break;
3050 	case OD_CCLK:
3051 		clk_type = SMU_OD_CCLK; break;
3052 	case OD_FAN_CURVE:
3053 		clk_type = SMU_OD_FAN_CURVE; break;
3054 	case OD_ACOUSTIC_LIMIT:
3055 		clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
3056 	case OD_ACOUSTIC_TARGET:
3057 		clk_type = SMU_OD_ACOUSTIC_TARGET; break;
3058 	case OD_FAN_TARGET_TEMPERATURE:
3059 		clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
3060 	case OD_FAN_MINIMUM_PWM:
3061 		clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
3062 	case OD_FAN_ZERO_RPM_ENABLE:
3063 		clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;
3064 	case OD_FAN_ZERO_RPM_STOP_TEMP:
3065 		clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;
3066 	default:
3067 		clk_type = SMU_CLK_COUNT; break;
3068 	}
3069 
3070 	return clk_type;
3071 }
3072 
3073 static int smu_print_ppclk_levels(void *handle,
3074 				  enum pp_clock_type type,
3075 				  char *buf)
3076 {
3077 	struct smu_context *smu = handle;
3078 	enum smu_clk_type clk_type;
3079 
3080 	clk_type = smu_convert_to_smuclk(type);
3081 	if (clk_type == SMU_CLK_COUNT)
3082 		return -EINVAL;
3083 
3084 	return smu_print_smuclk_levels(smu, clk_type, buf);
3085 }
3086 
3087 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
3088 {
3089 	struct smu_context *smu = handle;
3090 	enum smu_clk_type clk_type;
3091 
3092 	clk_type = smu_convert_to_smuclk(type);
3093 	if (clk_type == SMU_CLK_COUNT)
3094 		return -EINVAL;
3095 
3096 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3097 		return -EOPNOTSUPP;
3098 
3099 	if (!smu->ppt_funcs->emit_clk_levels)
3100 		return -ENOENT;
3101 
3102 	return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
3103 
3104 }
3105 
3106 static int smu_od_edit_dpm_table(void *handle,
3107 				 enum PP_OD_DPM_TABLE_COMMAND type,
3108 				 long *input, uint32_t size)
3109 {
3110 	struct smu_context *smu = handle;
3111 	int ret = 0;
3112 
3113 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3114 		return -EOPNOTSUPP;
3115 
3116 	if (smu->ppt_funcs->od_edit_dpm_table) {
3117 		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
3118 	}
3119 
3120 	return ret;
3121 }
3122 
3123 static int smu_read_sensor(void *handle,
3124 			   int sensor,
3125 			   void *data,
3126 			   int *size_arg)
3127 {
3128 	struct smu_context *smu = handle;
3129 	struct amdgpu_device *adev = smu->adev;
3130 	struct smu_umd_pstate_table *pstate_table =
3131 				&smu->pstate_table;
3132 	int i, ret = 0;
3133 	uint32_t *size, size_val;
3134 
3135 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3136 		return -EOPNOTSUPP;
3137 
3138 	if (!data || !size_arg)
3139 		return -EINVAL;
3140 
3141 	size_val = *size_arg;
3142 	size = &size_val;
3143 
3144 	if (smu->ppt_funcs->read_sensor)
3145 		if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
3146 			goto unlock;
3147 
3148 	switch (sensor) {
3149 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
3150 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
3151 		*size = 4;
3152 		break;
3153 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
3154 		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
3155 		*size = 4;
3156 		break;
3157 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
3158 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
3159 		*size = 4;
3160 		break;
3161 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
3162 		*((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
3163 		*size = 4;
3164 		break;
3165 	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3166 		ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
3167 		*size = 8;
3168 		break;
3169 	case AMDGPU_PP_SENSOR_UVD_POWER:
3170 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
3171 		*size = 4;
3172 		break;
3173 	case AMDGPU_PP_SENSOR_VCE_POWER:
3174 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
3175 		*size = 4;
3176 		break;
3177 	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
3178 		*(uint32_t *)data = 0;
3179 		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
3180 			if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {
3181 				*(uint32_t *)data = 1;
3182 				break;
3183 			}
3184 		}
3185 		*size = 4;
3186 		break;
3187 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
3188 		*(uint32_t *)data = 0;
3189 		*size = 4;
3190 		break;
3191 	default:
3192 		*size = 0;
3193 		ret = -EOPNOTSUPP;
3194 		break;
3195 	}
3196 
3197 unlock:
3198 	// assign uint32_t to int
3199 	*size_arg = size_val;
3200 
3201 	return ret;
3202 }
3203 
3204 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
3205 {
3206 	int ret = -EOPNOTSUPP;
3207 	struct smu_context *smu = handle;
3208 
3209 	if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3210 		ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3211 
3212 	return ret;
3213 }
3214 
3215 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3216 {
3217 	int ret = -EOPNOTSUPP;
3218 	struct smu_context *smu = handle;
3219 
3220 	if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3221 		ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3222 
3223 	return ret;
3224 }
3225 
3226 static int smu_get_power_profile_mode(void *handle, char *buf)
3227 {
3228 	struct smu_context *smu = handle;
3229 
3230 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3231 	    !smu->ppt_funcs->get_power_profile_mode)
3232 		return -EOPNOTSUPP;
3233 	if (!buf)
3234 		return -EINVAL;
3235 
3236 	return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3237 }
3238 
3239 static int smu_set_power_profile_mode(void *handle,
3240 				      long *param,
3241 				      uint32_t param_size)
3242 {
3243 	struct smu_context *smu = handle;
3244 	bool custom = false;
3245 	int ret = 0;
3246 
3247 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3248 	    !smu->ppt_funcs->set_power_profile_mode)
3249 		return -EOPNOTSUPP;
3250 
3251 	if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
3252 		custom = true;
3253 		/* clear frontend mask so custom changes propogate */
3254 		smu->workload_mask = 0;
3255 	}
3256 
3257 	if ((param[param_size] != smu->power_profile_mode) || custom) {
3258 		/* clear the old user preference */
3259 		smu_power_profile_mode_put(smu, smu->power_profile_mode);
3260 		/* set the new user preference */
3261 		smu_power_profile_mode_get(smu, param[param_size]);
3262 		ret = smu_bump_power_profile_mode(smu,
3263 						  custom ? param : NULL,
3264 						  custom ? param_size : 0);
3265 		if (ret)
3266 			smu_power_profile_mode_put(smu, param[param_size]);
3267 		else
3268 			/* store the user's preference */
3269 			smu->power_profile_mode = param[param_size];
3270 	}
3271 
3272 	return ret;
3273 }
3274 
3275 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3276 {
3277 	struct smu_context *smu = handle;
3278 
3279 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3280 		return -EOPNOTSUPP;
3281 
3282 	if (!smu->ppt_funcs->get_fan_control_mode)
3283 		return -EOPNOTSUPP;
3284 
3285 	if (!fan_mode)
3286 		return -EINVAL;
3287 
3288 	*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3289 
3290 	return 0;
3291 }
3292 
3293 static int smu_set_fan_control_mode(void *handle, u32 value)
3294 {
3295 	struct smu_context *smu = handle;
3296 	int ret = 0;
3297 
3298 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3299 		return -EOPNOTSUPP;
3300 
3301 	if (!smu->ppt_funcs->set_fan_control_mode)
3302 		return -EOPNOTSUPP;
3303 
3304 	if (value == U32_MAX)
3305 		return -EINVAL;
3306 
3307 	ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3308 	if (ret)
3309 		goto out;
3310 
3311 	if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3312 		smu->user_dpm_profile.fan_mode = value;
3313 
3314 		/* reset user dpm fan speed */
3315 		if (value != AMD_FAN_CTRL_MANUAL) {
3316 			smu->user_dpm_profile.fan_speed_pwm = 0;
3317 			smu->user_dpm_profile.fan_speed_rpm = 0;
3318 			smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3319 		}
3320 	}
3321 
3322 out:
3323 	return ret;
3324 }
3325 
3326 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3327 {
3328 	struct smu_context *smu = handle;
3329 	int ret = 0;
3330 
3331 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3332 		return -EOPNOTSUPP;
3333 
3334 	if (!smu->ppt_funcs->get_fan_speed_pwm)
3335 		return -EOPNOTSUPP;
3336 
3337 	if (!speed)
3338 		return -EINVAL;
3339 
3340 	ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3341 
3342 	return ret;
3343 }
3344 
3345 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3346 {
3347 	struct smu_context *smu = handle;
3348 	int ret = 0;
3349 
3350 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3351 		return -EOPNOTSUPP;
3352 
3353 	if (!smu->ppt_funcs->set_fan_speed_pwm)
3354 		return -EOPNOTSUPP;
3355 
3356 	if (speed == U32_MAX)
3357 		return -EINVAL;
3358 
3359 	ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3360 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3361 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3362 		smu->user_dpm_profile.fan_speed_pwm = speed;
3363 
3364 		/* Override custom RPM setting as they cannot co-exist */
3365 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3366 		smu->user_dpm_profile.fan_speed_rpm = 0;
3367 	}
3368 
3369 	return ret;
3370 }
3371 
3372 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3373 {
3374 	struct smu_context *smu = handle;
3375 	int ret = 0;
3376 
3377 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3378 		return -EOPNOTSUPP;
3379 
3380 	if (!smu->ppt_funcs->get_fan_speed_rpm)
3381 		return -EOPNOTSUPP;
3382 
3383 	if (!speed)
3384 		return -EINVAL;
3385 
3386 	ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3387 
3388 	return ret;
3389 }
3390 
3391 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3392 {
3393 	struct smu_context *smu = handle;
3394 
3395 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3396 		return -EOPNOTSUPP;
3397 
3398 	return smu_set_min_dcef_deep_sleep(smu, clk);
3399 }
3400 
3401 static int smu_get_clock_by_type_with_latency(void *handle,
3402 					      enum amd_pp_clock_type type,
3403 					      struct pp_clock_levels_with_latency *clocks)
3404 {
3405 	struct smu_context *smu = handle;
3406 	enum smu_clk_type clk_type;
3407 	int ret = 0;
3408 
3409 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3410 		return -EOPNOTSUPP;
3411 
3412 	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3413 		switch (type) {
3414 		case amd_pp_sys_clock:
3415 			clk_type = SMU_GFXCLK;
3416 			break;
3417 		case amd_pp_mem_clock:
3418 			clk_type = SMU_MCLK;
3419 			break;
3420 		case amd_pp_dcef_clock:
3421 			clk_type = SMU_DCEFCLK;
3422 			break;
3423 		case amd_pp_disp_clock:
3424 			clk_type = SMU_DISPCLK;
3425 			break;
3426 		default:
3427 			dev_err(smu->adev->dev, "Invalid clock type!\n");
3428 			return -EINVAL;
3429 		}
3430 
3431 		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3432 	}
3433 
3434 	return ret;
3435 }
3436 
3437 static int smu_display_clock_voltage_request(void *handle,
3438 					     struct pp_display_clock_request *clock_req)
3439 {
3440 	struct smu_context *smu = handle;
3441 	int ret = 0;
3442 
3443 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3444 		return -EOPNOTSUPP;
3445 
3446 	if (smu->ppt_funcs->display_clock_voltage_request)
3447 		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3448 
3449 	return ret;
3450 }
3451 
3452 
3453 static int smu_display_disable_memory_clock_switch(void *handle,
3454 						   bool disable_memory_clock_switch)
3455 {
3456 	struct smu_context *smu = handle;
3457 	int ret = -EINVAL;
3458 
3459 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3460 		return -EOPNOTSUPP;
3461 
3462 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
3463 		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3464 
3465 	return ret;
3466 }
3467 
3468 static int smu_set_xgmi_pstate(void *handle,
3469 			       uint32_t pstate)
3470 {
3471 	struct smu_context *smu = handle;
3472 	int ret = 0;
3473 
3474 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3475 		return -EOPNOTSUPP;
3476 
3477 	if (smu->ppt_funcs->set_xgmi_pstate)
3478 		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3479 
3480 	if (ret)
3481 		dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3482 
3483 	return ret;
3484 }
3485 
3486 static int smu_get_baco_capability(void *handle)
3487 {
3488 	struct smu_context *smu = handle;
3489 
3490 	if (!smu->pm_enabled)
3491 		return false;
3492 
3493 	if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3494 		return false;
3495 
3496 	return smu->ppt_funcs->get_bamaco_support(smu);
3497 }
3498 
3499 static int smu_baco_set_state(void *handle, int state)
3500 {
3501 	struct smu_context *smu = handle;
3502 	int ret = 0;
3503 
3504 	if (!smu->pm_enabled)
3505 		return -EOPNOTSUPP;
3506 
3507 	if (state == 0) {
3508 		if (smu->ppt_funcs->baco_exit)
3509 			ret = smu->ppt_funcs->baco_exit(smu);
3510 	} else if (state == 1) {
3511 		if (smu->ppt_funcs->baco_enter)
3512 			ret = smu->ppt_funcs->baco_enter(smu);
3513 	} else {
3514 		return -EINVAL;
3515 	}
3516 
3517 	if (ret)
3518 		dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3519 				(state)?"enter":"exit");
3520 
3521 	return ret;
3522 }
3523 
3524 bool smu_mode1_reset_is_support(struct smu_context *smu)
3525 {
3526 	bool ret = false;
3527 
3528 	if (!smu->pm_enabled)
3529 		return false;
3530 
3531 	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3532 		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3533 
3534 	return ret;
3535 }
3536 
3537 bool smu_link_reset_is_support(struct smu_context *smu)
3538 {
3539 	bool ret = false;
3540 
3541 	if (!smu->pm_enabled)
3542 		return false;
3543 
3544 	if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
3545 		ret = smu->ppt_funcs->link_reset_is_support(smu);
3546 
3547 	return ret;
3548 }
3549 
3550 int smu_mode1_reset(struct smu_context *smu)
3551 {
3552 	int ret = 0;
3553 
3554 	if (!smu->pm_enabled)
3555 		return -EOPNOTSUPP;
3556 
3557 	if (smu->ppt_funcs->mode1_reset)
3558 		ret = smu->ppt_funcs->mode1_reset(smu);
3559 
3560 	return ret;
3561 }
3562 
3563 static int smu_mode2_reset(void *handle)
3564 {
3565 	struct smu_context *smu = handle;
3566 	int ret = 0;
3567 
3568 	if (!smu->pm_enabled)
3569 		return -EOPNOTSUPP;
3570 
3571 	if (smu->ppt_funcs->mode2_reset)
3572 		ret = smu->ppt_funcs->mode2_reset(smu);
3573 
3574 	if (ret)
3575 		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3576 
3577 	return ret;
3578 }
3579 
3580 int smu_link_reset(struct smu_context *smu)
3581 {
3582 	int ret = 0;
3583 
3584 	if (!smu->pm_enabled)
3585 		return -EOPNOTSUPP;
3586 
3587 	if (smu->ppt_funcs->link_reset)
3588 		ret = smu->ppt_funcs->link_reset(smu);
3589 
3590 	return ret;
3591 }
3592 
3593 static int smu_enable_gfx_features(void *handle)
3594 {
3595 	struct smu_context *smu = handle;
3596 	int ret = 0;
3597 
3598 	if (!smu->pm_enabled)
3599 		return -EOPNOTSUPP;
3600 
3601 	if (smu->ppt_funcs->enable_gfx_features)
3602 		ret = smu->ppt_funcs->enable_gfx_features(smu);
3603 
3604 	if (ret)
3605 		dev_err(smu->adev->dev, "enable gfx features failed!\n");
3606 
3607 	return ret;
3608 }
3609 
3610 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3611 						struct pp_smu_nv_clock_table *max_clocks)
3612 {
3613 	struct smu_context *smu = handle;
3614 	int ret = 0;
3615 
3616 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3617 		return -EOPNOTSUPP;
3618 
3619 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3620 		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3621 
3622 	return ret;
3623 }
3624 
3625 static int smu_get_uclk_dpm_states(void *handle,
3626 				   unsigned int *clock_values_in_khz,
3627 				   unsigned int *num_states)
3628 {
3629 	struct smu_context *smu = handle;
3630 	int ret = 0;
3631 
3632 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3633 		return -EOPNOTSUPP;
3634 
3635 	if (smu->ppt_funcs->get_uclk_dpm_states)
3636 		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3637 
3638 	return ret;
3639 }
3640 
3641 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3642 {
3643 	struct smu_context *smu = handle;
3644 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3645 
3646 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3647 		return -EOPNOTSUPP;
3648 
3649 	if (smu->ppt_funcs->get_current_power_state)
3650 		pm_state = smu->ppt_funcs->get_current_power_state(smu);
3651 
3652 	return pm_state;
3653 }
3654 
3655 static int smu_get_dpm_clock_table(void *handle,
3656 				   struct dpm_clocks *clock_table)
3657 {
3658 	struct smu_context *smu = handle;
3659 	int ret = 0;
3660 
3661 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3662 		return -EOPNOTSUPP;
3663 
3664 	if (smu->ppt_funcs->get_dpm_clock_table)
3665 		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3666 
3667 	return ret;
3668 }
3669 
3670 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3671 {
3672 	struct smu_context *smu = handle;
3673 
3674 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3675 		return -EOPNOTSUPP;
3676 
3677 	if (!smu->ppt_funcs->get_gpu_metrics)
3678 		return -EOPNOTSUPP;
3679 
3680 	return smu->ppt_funcs->get_gpu_metrics(smu, table);
3681 }
3682 
3683 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3684 				      size_t size)
3685 {
3686 	struct smu_context *smu = handle;
3687 
3688 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3689 		return -EOPNOTSUPP;
3690 
3691 	if (!smu->ppt_funcs->get_pm_metrics)
3692 		return -EOPNOTSUPP;
3693 
3694 	return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3695 }
3696 
3697 static int smu_enable_mgpu_fan_boost(void *handle)
3698 {
3699 	struct smu_context *smu = handle;
3700 	int ret = 0;
3701 
3702 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3703 		return -EOPNOTSUPP;
3704 
3705 	if (smu->ppt_funcs->enable_mgpu_fan_boost)
3706 		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3707 
3708 	return ret;
3709 }
3710 
3711 static int smu_gfx_state_change_set(void *handle,
3712 				    uint32_t state)
3713 {
3714 	struct smu_context *smu = handle;
3715 	int ret = 0;
3716 
3717 	if (smu->ppt_funcs->gfx_state_change_set)
3718 		ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3719 
3720 	return ret;
3721 }
3722 
3723 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3724 {
3725 	int ret = 0;
3726 
3727 	if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3728 		ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3729 
3730 	return ret;
3731 }
3732 
3733 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3734 {
3735 	int ret = -EOPNOTSUPP;
3736 
3737 	if (smu->ppt_funcs &&
3738 		smu->ppt_funcs->get_ecc_info)
3739 		ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3740 
3741 	return ret;
3742 
3743 }
3744 
3745 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3746 {
3747 	struct smu_context *smu = handle;
3748 	struct smu_table_context *smu_table = &smu->smu_table;
3749 	struct smu_table *memory_pool = &smu_table->memory_pool;
3750 
3751 	if (!addr || !size)
3752 		return -EINVAL;
3753 
3754 	*addr = NULL;
3755 	*size = 0;
3756 	if (memory_pool->bo) {
3757 		*addr = memory_pool->cpu_addr;
3758 		*size = memory_pool->size;
3759 	}
3760 
3761 	return 0;
3762 }
3763 
3764 static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3765 				 size_t *size)
3766 {
3767 	size_t offset = *size;
3768 	int level;
3769 
3770 	for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3771 		if (level == policy->current_level)
3772 			offset += sysfs_emit_at(sysbuf, offset,
3773 				"%d : %s*\n", level,
3774 				policy->desc->get_desc(policy, level));
3775 		else
3776 			offset += sysfs_emit_at(sysbuf, offset,
3777 				"%d : %s\n", level,
3778 				policy->desc->get_desc(policy, level));
3779 	}
3780 
3781 	*size = offset;
3782 }
3783 
3784 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3785 			       enum pp_pm_policy p_type, char *sysbuf)
3786 {
3787 	struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3788 	struct smu_dpm_policy_ctxt *policy_ctxt;
3789 	struct smu_dpm_policy *dpm_policy;
3790 	size_t offset = 0;
3791 
3792 	policy_ctxt = dpm_ctxt->dpm_policies;
3793 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3794 	    !policy_ctxt->policy_mask)
3795 		return -EOPNOTSUPP;
3796 
3797 	if (p_type == PP_PM_POLICY_NONE)
3798 		return -EINVAL;
3799 
3800 	dpm_policy = smu_get_pm_policy(smu, p_type);
3801 	if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3802 		return -ENOENT;
3803 
3804 	if (!sysbuf)
3805 		return -EINVAL;
3806 
3807 	smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3808 
3809 	return offset;
3810 }
3811 
3812 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3813 					 enum pp_pm_policy p_type)
3814 {
3815 	struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3816 	struct smu_dpm_policy_ctxt *policy_ctxt;
3817 	int i;
3818 
3819 	policy_ctxt = dpm_ctxt->dpm_policies;
3820 	if (!policy_ctxt)
3821 		return NULL;
3822 
3823 	for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3824 		if (policy_ctxt->policies[i].policy_type == p_type)
3825 			return &policy_ctxt->policies[i];
3826 	}
3827 
3828 	return NULL;
3829 }
3830 
3831 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3832 		      int level)
3833 {
3834 	struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3835 	struct smu_dpm_policy *dpm_policy = NULL;
3836 	struct smu_dpm_policy_ctxt *policy_ctxt;
3837 	int ret = -EOPNOTSUPP;
3838 
3839 	policy_ctxt = dpm_ctxt->dpm_policies;
3840 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3841 	    !policy_ctxt->policy_mask)
3842 		return ret;
3843 
3844 	if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3845 		return -EINVAL;
3846 
3847 	dpm_policy = smu_get_pm_policy(smu, p_type);
3848 
3849 	if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3850 		return ret;
3851 
3852 	if (dpm_policy->current_level == level)
3853 		return 0;
3854 
3855 	ret = dpm_policy->set_policy(smu, level);
3856 
3857 	if (!ret)
3858 		dpm_policy->current_level = level;
3859 
3860 	return ret;
3861 }
3862 
3863 static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)
3864 {
3865 	struct smu_context *smu = handle;
3866 	struct smu_table_context *smu_table = &smu->smu_table;
3867 	struct smu_table *tables = smu_table->tables;
3868 	enum smu_table_id table_id;
3869 
3870 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3871 		return -EOPNOTSUPP;
3872 
3873 	if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics)
3874 		return -EOPNOTSUPP;
3875 
3876 	table_id = smu_metrics_get_temp_table_id(type);
3877 
3878 	if (table_id == SMU_TABLE_COUNT)
3879 		return -EINVAL;
3880 
3881 	/* If the request is to get size alone, return the cached table size */
3882 	if (!table && tables[table_id].cache.size)
3883 		return tables[table_id].cache.size;
3884 
3885 	if (smu_table_cache_is_valid(&tables[table_id])) {
3886 		memcpy(table, tables[table_id].cache.buffer,
3887 		       tables[table_id].cache.size);
3888 		return tables[table_id].cache.size;
3889 	}
3890 
3891 	return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);
3892 }
3893 
3894 static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type)
3895 {
3896 	struct smu_context *smu = handle;
3897 	bool ret = false;
3898 
3899 	if (!smu->pm_enabled)
3900 		return false;
3901 
3902 	if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported)
3903 		ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type);
3904 
3905 	return ret;
3906 }
3907 
3908 static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
3909 {
3910 	struct smu_context *smu = handle;
3911 
3912 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3913 		return -EOPNOTSUPP;
3914 
3915 	if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
3916 		return -EOPNOTSUPP;
3917 
3918 	return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
3919 }
3920 
3921 static const struct amd_pm_funcs swsmu_pm_funcs = {
3922 	/* export for sysfs */
3923 	.set_fan_control_mode    = smu_set_fan_control_mode,
3924 	.get_fan_control_mode    = smu_get_fan_control_mode,
3925 	.set_fan_speed_pwm   = smu_set_fan_speed_pwm,
3926 	.get_fan_speed_pwm   = smu_get_fan_speed_pwm,
3927 	.force_clock_level       = smu_force_ppclk_levels,
3928 	.print_clock_levels      = smu_print_ppclk_levels,
3929 	.emit_clock_levels       = smu_emit_ppclk_levels,
3930 	.force_performance_level = smu_force_performance_level,
3931 	.read_sensor             = smu_read_sensor,
3932 	.get_apu_thermal_limit       = smu_get_apu_thermal_limit,
3933 	.set_apu_thermal_limit       = smu_set_apu_thermal_limit,
3934 	.get_performance_level   = smu_get_performance_level,
3935 	.get_current_power_state = smu_get_current_power_state,
3936 	.get_fan_speed_rpm       = smu_get_fan_speed_rpm,
3937 	.set_fan_speed_rpm       = smu_set_fan_speed_rpm,
3938 	.get_pp_num_states       = smu_get_power_num_states,
3939 	.get_pp_table            = smu_sys_get_pp_table,
3940 	.set_pp_table            = smu_sys_set_pp_table,
3941 	.switch_power_profile    = smu_switch_power_profile,
3942 	.pause_power_profile     = smu_pause_power_profile,
3943 	/* export to amdgpu */
3944 	.dispatch_tasks          = smu_handle_dpm_task,
3945 	.load_firmware           = smu_load_microcode,
3946 	.set_powergating_by_smu  = smu_dpm_set_power_gate,
3947 	.set_power_limit         = smu_set_power_limit,
3948 	.get_power_limit         = smu_get_power_limit,
3949 	.get_power_profile_mode  = smu_get_power_profile_mode,
3950 	.set_power_profile_mode  = smu_set_power_profile_mode,
3951 	.odn_edit_dpm_table      = smu_od_edit_dpm_table,
3952 	.set_mp1_state           = smu_set_mp1_state,
3953 	.gfx_state_change_set    = smu_gfx_state_change_set,
3954 	/* export to DC */
3955 	.get_sclk                         = smu_get_sclk,
3956 	.get_mclk                         = smu_get_mclk,
3957 	.display_configuration_change     = smu_display_configuration_change,
3958 	.get_clock_by_type_with_latency   = smu_get_clock_by_type_with_latency,
3959 	.display_clock_voltage_request    = smu_display_clock_voltage_request,
3960 	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
3961 	.set_active_display_count         = smu_set_display_count,
3962 	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
3963 	.get_asic_baco_capability         = smu_get_baco_capability,
3964 	.set_asic_baco_state              = smu_baco_set_state,
3965 	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
3966 	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
3967 	.asic_reset_mode_2                = smu_mode2_reset,
3968 	.asic_reset_enable_gfx_features   = smu_enable_gfx_features,
3969 	.set_df_cstate                    = smu_set_df_cstate,
3970 	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
3971 	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
3972 	.get_pm_metrics                   = smu_sys_get_pm_metrics,
3973 	.set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
3974 	.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3975 	.get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
3976 	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
3977 	.get_dpm_clock_table              = smu_get_dpm_clock_table,
3978 	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
3979 	.get_xcp_metrics                  = smu_sys_get_xcp_metrics,
3980 	.get_temp_metrics             = smu_sys_get_temp_metrics,
3981 	.temp_metrics_is_supported      = smu_temp_metrics_is_supported,
3982 };
3983 
3984 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3985 		       uint64_t event_arg)
3986 {
3987 	int ret = -EINVAL;
3988 
3989 	if (smu->ppt_funcs->wait_for_event)
3990 		ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3991 
3992 	return ret;
3993 }
3994 
3995 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3996 {
3997 
3998 	if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3999 		return -EOPNOTSUPP;
4000 
4001 	/* Confirm the buffer allocated is of correct size */
4002 	if (size != smu->stb_context.stb_buf_size)
4003 		return -EINVAL;
4004 
4005 	/*
4006 	 * No need to lock smu mutex as we access STB directly through MMIO
4007 	 * and not going through SMU messaging route (for now at least).
4008 	 * For registers access rely on implementation internal locking.
4009 	 */
4010 	return smu->ppt_funcs->stb_collect_info(smu, buf, size);
4011 }
4012 
4013 #if defined(CONFIG_DEBUG_FS)
4014 
4015 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
4016 {
4017 	struct amdgpu_device *adev = filp->f_inode->i_private;
4018 	struct smu_context *smu = adev->powerplay.pp_handle;
4019 	unsigned char *buf;
4020 	int r;
4021 
4022 	buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
4023 	if (!buf)
4024 		return -ENOMEM;
4025 
4026 	r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
4027 	if (r)
4028 		goto out;
4029 
4030 	filp->private_data = buf;
4031 
4032 	return 0;
4033 
4034 out:
4035 	kvfree(buf);
4036 	return r;
4037 }
4038 
4039 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
4040 				loff_t *pos)
4041 {
4042 	struct amdgpu_device *adev = filp->f_inode->i_private;
4043 	struct smu_context *smu = adev->powerplay.pp_handle;
4044 
4045 
4046 	if (!filp->private_data)
4047 		return -EINVAL;
4048 
4049 	return simple_read_from_buffer(buf,
4050 				       size,
4051 				       pos, filp->private_data,
4052 				       smu->stb_context.stb_buf_size);
4053 }
4054 
4055 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
4056 {
4057 	kvfree(filp->private_data);
4058 	filp->private_data = NULL;
4059 
4060 	return 0;
4061 }
4062 
4063 /*
4064  * We have to define not only read method but also
4065  * open and release because .read takes up to PAGE_SIZE
4066  * data each time so and so is invoked multiple times.
4067  *  We allocate the STB buffer in .open and release it
4068  *  in .release
4069  */
4070 static const struct file_operations smu_stb_debugfs_fops = {
4071 	.owner = THIS_MODULE,
4072 	.open = smu_stb_debugfs_open,
4073 	.read = smu_stb_debugfs_read,
4074 	.release = smu_stb_debugfs_release,
4075 	.llseek = default_llseek,
4076 };
4077 
4078 #endif
4079 
4080 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
4081 {
4082 #if defined(CONFIG_DEBUG_FS)
4083 
4084 	struct smu_context *smu = adev->powerplay.pp_handle;
4085 
4086 	if (!smu || (!smu->stb_context.stb_buf_size))
4087 		return;
4088 
4089 	debugfs_create_file_size("amdgpu_smu_stb_dump",
4090 			    S_IRUSR,
4091 			    adev_to_drm(adev)->primary->debugfs_root,
4092 			    adev,
4093 			    &smu_stb_debugfs_fops,
4094 			    smu->stb_context.stb_buf_size);
4095 #endif
4096 }
4097 
4098 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
4099 {
4100 	int ret = 0;
4101 
4102 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
4103 		ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
4104 
4105 	return ret;
4106 }
4107 
4108 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
4109 {
4110 	int ret = 0;
4111 
4112 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
4113 		ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
4114 
4115 	return ret;
4116 }
4117 
4118 int smu_send_rma_reason(struct smu_context *smu)
4119 {
4120 	int ret = 0;
4121 
4122 	if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
4123 		ret = smu->ppt_funcs->send_rma_reason(smu);
4124 
4125 	return ret;
4126 }
4127 
4128 /**
4129  * smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU
4130  * @smu: smu_context pointer
4131  *
4132  * This function checks if the SMU supports resetting the SDMA engine.
4133  * It returns true if supported, false otherwise.
4134  */
4135 bool smu_reset_sdma_is_supported(struct smu_context *smu)
4136 {
4137 	bool ret = false;
4138 
4139 	if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported)
4140 		ret = smu->ppt_funcs->reset_sdma_is_supported(smu);
4141 
4142 	return ret;
4143 }
4144 
4145 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
4146 {
4147 	int ret = 0;
4148 
4149 	if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)
4150 		ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);
4151 
4152 	return ret;
4153 }
4154 
4155 bool smu_reset_vcn_is_supported(struct smu_context *smu)
4156 {
4157 	bool ret = false;
4158 
4159 	if (smu->ppt_funcs && smu->ppt_funcs->reset_vcn_is_supported)
4160 		ret = smu->ppt_funcs->reset_vcn_is_supported(smu);
4161 
4162 	return ret;
4163 }
4164 
4165 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
4166 {
4167 	if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
4168 		smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
4169 
4170 	return 0;
4171 }
4172