xref: /linux/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c (revision 96f30c8f0aa9923aa39b30bcaefeacf88b490231)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #define SWSMU_CODE_LAYER_L1
24 
25 #include <linux/firmware.h>
26 #include <linux/pci.h>
27 #include <linux/power_supply.h>
28 #include <linux/reboot.h>
29 
30 #include "amdgpu.h"
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atom.h"
34 #include "arcturus_ppt.h"
35 #include "navi10_ppt.h"
36 #include "sienna_cichlid_ppt.h"
37 #include "renoir_ppt.h"
38 #include "vangogh_ppt.h"
39 #include "aldebaran_ppt.h"
40 #include "yellow_carp_ppt.h"
41 #include "cyan_skillfish_ppt.h"
42 #include "smu_v13_0_0_ppt.h"
43 #include "smu_v13_0_4_ppt.h"
44 #include "smu_v13_0_5_ppt.h"
45 #include "smu_v13_0_6_ppt.h"
46 #include "smu_v13_0_7_ppt.h"
47 #include "smu_v14_0_0_ppt.h"
48 #include "smu_v14_0_2_ppt.h"
49 #include "amd_pcie.h"
50 
51 /*
52  * DO NOT use these for err/warn/info/debug messages.
53  * Use dev_err, dev_warn, dev_info and dev_dbg instead.
54  * They are more MGPU friendly.
55  */
56 #undef pr_err
57 #undef pr_warn
58 #undef pr_info
59 #undef pr_debug
60 
61 static const struct amd_pm_funcs swsmu_pm_funcs;
62 static int smu_force_smuclk_levels(struct smu_context *smu,
63 				   enum smu_clk_type clk_type,
64 				   uint32_t mask);
65 static int smu_handle_task(struct smu_context *smu,
66 			   enum amd_dpm_forced_level level,
67 			   enum amd_pp_task task_id);
68 static int smu_reset(struct smu_context *smu);
69 static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70 static int smu_set_fan_control_mode(void *handle, u32 value);
71 static int smu_set_power_limit(void *handle, uint32_t limit);
72 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74 static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75 
76 static int smu_sys_get_pp_feature_mask(void *handle,
77 				       char *buf)
78 {
79 	struct smu_context *smu = handle;
80 
81 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
82 		return -EOPNOTSUPP;
83 
84 	return smu_get_pp_feature_mask(smu, buf);
85 }
86 
87 static int smu_sys_set_pp_feature_mask(void *handle,
88 				       uint64_t new_mask)
89 {
90 	struct smu_context *smu = handle;
91 
92 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
93 		return -EOPNOTSUPP;
94 
95 	return smu_set_pp_feature_mask(smu, new_mask);
96 }
97 
98 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
99 {
100 	if (!smu->ppt_funcs->set_gfx_off_residency)
101 		return -EINVAL;
102 
103 	return smu_set_gfx_off_residency(smu, value);
104 }
105 
106 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
107 {
108 	if (!smu->ppt_funcs->get_gfx_off_residency)
109 		return -EINVAL;
110 
111 	return smu_get_gfx_off_residency(smu, value);
112 }
113 
114 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
115 {
116 	if (!smu->ppt_funcs->get_gfx_off_entrycount)
117 		return -EINVAL;
118 
119 	return smu_get_gfx_off_entrycount(smu, value);
120 }
121 
122 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
123 {
124 	if (!smu->ppt_funcs->get_gfx_off_status)
125 		return -EINVAL;
126 
127 	*value = smu_get_gfx_off_status(smu);
128 
129 	return 0;
130 }
131 
132 int smu_set_soft_freq_range(struct smu_context *smu,
133 			    enum smu_clk_type clk_type,
134 			    uint32_t min,
135 			    uint32_t max)
136 {
137 	int ret = 0;
138 
139 	if (smu->ppt_funcs->set_soft_freq_limited_range)
140 		ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
141 								  clk_type,
142 								  min,
143 								  max);
144 
145 	return ret;
146 }
147 
148 int smu_get_dpm_freq_range(struct smu_context *smu,
149 			   enum smu_clk_type clk_type,
150 			   uint32_t *min,
151 			   uint32_t *max)
152 {
153 	int ret = -ENOTSUPP;
154 
155 	if (!min && !max)
156 		return -EINVAL;
157 
158 	if (smu->ppt_funcs->get_dpm_ultimate_freq)
159 		ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
160 							    clk_type,
161 							    min,
162 							    max);
163 
164 	return ret;
165 }
166 
167 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
168 {
169 	int ret = 0;
170 	struct amdgpu_device *adev = smu->adev;
171 
172 	if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
173 		ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
174 		if (ret)
175 			dev_err(adev->dev, "Failed to enable gfx imu!\n");
176 	}
177 	return ret;
178 }
179 
180 static u32 smu_get_mclk(void *handle, bool low)
181 {
182 	struct smu_context *smu = handle;
183 	uint32_t clk_freq;
184 	int ret = 0;
185 
186 	ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
187 				     low ? &clk_freq : NULL,
188 				     !low ? &clk_freq : NULL);
189 	if (ret)
190 		return 0;
191 	return clk_freq * 100;
192 }
193 
194 static u32 smu_get_sclk(void *handle, bool low)
195 {
196 	struct smu_context *smu = handle;
197 	uint32_t clk_freq;
198 	int ret = 0;
199 
200 	ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
201 				     low ? &clk_freq : NULL,
202 				     !low ? &clk_freq : NULL);
203 	if (ret)
204 		return 0;
205 	return clk_freq * 100;
206 }
207 
208 static int smu_set_gfx_imu_enable(struct smu_context *smu)
209 {
210 	struct amdgpu_device *adev = smu->adev;
211 
212 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
213 		return 0;
214 
215 	if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
216 		return 0;
217 
218 	return smu_set_gfx_power_up_by_imu(smu);
219 }
220 
221 static bool is_vcn_enabled(struct amdgpu_device *adev)
222 {
223 	int i;
224 
225 	for (i = 0; i < adev->num_ip_blocks; i++) {
226 		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
227 			adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
228 			!adev->ip_blocks[i].status.valid)
229 			return false;
230 	}
231 
232 	return true;
233 }
234 
235 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
236 				  bool enable)
237 {
238 	struct smu_power_context *smu_power = &smu->smu_power;
239 	struct smu_power_gate *power_gate = &smu_power->power_gate;
240 	int ret = 0;
241 
242 	/*
243 	 * don't poweron vcn/jpeg when they are skipped.
244 	 */
245 	if (!is_vcn_enabled(smu->adev))
246 		return 0;
247 
248 	if (!smu->ppt_funcs->dpm_set_vcn_enable)
249 		return 0;
250 
251 	if (atomic_read(&power_gate->vcn_gated) ^ enable)
252 		return 0;
253 
254 	ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
255 	if (!ret)
256 		atomic_set(&power_gate->vcn_gated, !enable);
257 
258 	return ret;
259 }
260 
261 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
262 				   bool enable)
263 {
264 	struct smu_power_context *smu_power = &smu->smu_power;
265 	struct smu_power_gate *power_gate = &smu_power->power_gate;
266 	int ret = 0;
267 
268 	if (!is_vcn_enabled(smu->adev))
269 		return 0;
270 
271 	if (!smu->ppt_funcs->dpm_set_jpeg_enable)
272 		return 0;
273 
274 	if (atomic_read(&power_gate->jpeg_gated) ^ enable)
275 		return 0;
276 
277 	ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
278 	if (!ret)
279 		atomic_set(&power_gate->jpeg_gated, !enable);
280 
281 	return ret;
282 }
283 
284 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
285 				   bool enable)
286 {
287 	struct smu_power_context *smu_power = &smu->smu_power;
288 	struct smu_power_gate *power_gate = &smu_power->power_gate;
289 	int ret = 0;
290 
291 	if (!smu->ppt_funcs->dpm_set_vpe_enable)
292 		return 0;
293 
294 	if (atomic_read(&power_gate->vpe_gated) ^ enable)
295 		return 0;
296 
297 	ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
298 	if (!ret)
299 		atomic_set(&power_gate->vpe_gated, !enable);
300 
301 	return ret;
302 }
303 
304 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
305 				   bool enable)
306 {
307 	struct smu_power_context *smu_power = &smu->smu_power;
308 	struct smu_power_gate *power_gate = &smu_power->power_gate;
309 	int ret = 0;
310 
311 	if (!smu->adev->enable_umsch_mm)
312 		return 0;
313 
314 	if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
315 		return 0;
316 
317 	if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
318 		return 0;
319 
320 	ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
321 	if (!ret)
322 		atomic_set(&power_gate->umsch_mm_gated, !enable);
323 
324 	return ret;
325 }
326 
327 static int smu_set_mall_enable(struct smu_context *smu)
328 {
329 	int ret = 0;
330 
331 	if (!smu->ppt_funcs->set_mall_enable)
332 		return 0;
333 
334 	ret = smu->ppt_funcs->set_mall_enable(smu);
335 
336 	return ret;
337 }
338 
339 /**
340  * smu_dpm_set_power_gate - power gate/ungate the specific IP block
341  *
342  * @handle:        smu_context pointer
343  * @block_type: the IP block to power gate/ungate
344  * @gate:       to power gate if true, ungate otherwise
345  *
346  * This API uses no smu->mutex lock protection due to:
347  * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
348  *    This is guarded to be race condition free by the caller.
349  * 2. Or get called on user setting request of power_dpm_force_performance_level.
350  *    Under this case, the smu->mutex lock protection is already enforced on
351  *    the parent API smu_force_performance_level of the call path.
352  */
353 static int smu_dpm_set_power_gate(void *handle,
354 				  uint32_t block_type,
355 				  bool gate)
356 {
357 	struct smu_context *smu = handle;
358 	int ret = 0;
359 
360 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
361 		dev_WARN(smu->adev->dev,
362 			 "SMU uninitialized but power %s requested for %u!\n",
363 			 gate ? "gate" : "ungate", block_type);
364 		return -EOPNOTSUPP;
365 	}
366 
367 	switch (block_type) {
368 	/*
369 	 * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
370 	 * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
371 	 */
372 	case AMD_IP_BLOCK_TYPE_UVD:
373 	case AMD_IP_BLOCK_TYPE_VCN:
374 		ret = smu_dpm_set_vcn_enable(smu, !gate);
375 		if (ret)
376 			dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
377 				gate ? "gate" : "ungate");
378 		break;
379 	case AMD_IP_BLOCK_TYPE_GFX:
380 		ret = smu_gfx_off_control(smu, gate);
381 		if (ret)
382 			dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
383 				gate ? "enable" : "disable");
384 		break;
385 	case AMD_IP_BLOCK_TYPE_SDMA:
386 		ret = smu_powergate_sdma(smu, gate);
387 		if (ret)
388 			dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
389 				gate ? "gate" : "ungate");
390 		break;
391 	case AMD_IP_BLOCK_TYPE_JPEG:
392 		ret = smu_dpm_set_jpeg_enable(smu, !gate);
393 		if (ret)
394 			dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
395 				gate ? "gate" : "ungate");
396 		break;
397 	case AMD_IP_BLOCK_TYPE_VPE:
398 		ret = smu_dpm_set_vpe_enable(smu, !gate);
399 		if (ret)
400 			dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
401 				gate ? "gate" : "ungate");
402 		break;
403 	default:
404 		dev_err(smu->adev->dev, "Unsupported block type!\n");
405 		return -EINVAL;
406 	}
407 
408 	return ret;
409 }
410 
411 /**
412  * smu_set_user_clk_dependencies - set user profile clock dependencies
413  *
414  * @smu:	smu_context pointer
415  * @clk:	enum smu_clk_type type
416  *
417  * Enable/Disable the clock dependency for the @clk type.
418  */
419 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
420 {
421 	if (smu->adev->in_suspend)
422 		return;
423 
424 	if (clk == SMU_MCLK) {
425 		smu->user_dpm_profile.clk_dependency = 0;
426 		smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
427 	} else if (clk == SMU_FCLK) {
428 		/* MCLK takes precedence over FCLK */
429 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
430 			return;
431 
432 		smu->user_dpm_profile.clk_dependency = 0;
433 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
434 	} else if (clk == SMU_SOCCLK) {
435 		/* MCLK takes precedence over SOCCLK */
436 		if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
437 			return;
438 
439 		smu->user_dpm_profile.clk_dependency = 0;
440 		smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
441 	} else
442 		/* Add clk dependencies here, if any */
443 		return;
444 }
445 
446 /**
447  * smu_restore_dpm_user_profile - reinstate user dpm profile
448  *
449  * @smu:	smu_context pointer
450  *
451  * Restore the saved user power configurations include power limit,
452  * clock frequencies, fan control mode and fan speed.
453  */
454 static void smu_restore_dpm_user_profile(struct smu_context *smu)
455 {
456 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
457 	int ret = 0;
458 
459 	if (!smu->adev->in_suspend)
460 		return;
461 
462 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
463 		return;
464 
465 	/* Enable restore flag */
466 	smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
467 
468 	/* set the user dpm power limit */
469 	if (smu->user_dpm_profile.power_limit) {
470 		ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
471 		if (ret)
472 			dev_err(smu->adev->dev, "Failed to set power limit value\n");
473 	}
474 
475 	/* set the user dpm clock configurations */
476 	if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
477 		enum smu_clk_type clk_type;
478 
479 		for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
480 			/*
481 			 * Iterate over smu clk type and force the saved user clk
482 			 * configs, skip if clock dependency is enabled
483 			 */
484 			if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
485 					smu->user_dpm_profile.clk_mask[clk_type]) {
486 				ret = smu_force_smuclk_levels(smu, clk_type,
487 						smu->user_dpm_profile.clk_mask[clk_type]);
488 				if (ret)
489 					dev_err(smu->adev->dev,
490 						"Failed to set clock type = %d\n", clk_type);
491 			}
492 		}
493 	}
494 
495 	/* set the user dpm fan configurations */
496 	if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
497 	    smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
498 		ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
499 		if (ret != -EOPNOTSUPP) {
500 			smu->user_dpm_profile.fan_speed_pwm = 0;
501 			smu->user_dpm_profile.fan_speed_rpm = 0;
502 			smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
503 			dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
504 		}
505 
506 		if (smu->user_dpm_profile.fan_speed_pwm) {
507 			ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
508 			if (ret != -EOPNOTSUPP)
509 				dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
510 		}
511 
512 		if (smu->user_dpm_profile.fan_speed_rpm) {
513 			ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
514 			if (ret != -EOPNOTSUPP)
515 				dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
516 		}
517 	}
518 
519 	/* Restore user customized OD settings */
520 	if (smu->user_dpm_profile.user_od) {
521 		if (smu->ppt_funcs->restore_user_od_settings) {
522 			ret = smu->ppt_funcs->restore_user_od_settings(smu);
523 			if (ret)
524 				dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
525 		}
526 	}
527 
528 	/* Disable restore flag */
529 	smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
530 }
531 
532 static int smu_get_power_num_states(void *handle,
533 				    struct pp_states_info *state_info)
534 {
535 	if (!state_info)
536 		return -EINVAL;
537 
538 	/* not support power state */
539 	memset(state_info, 0, sizeof(struct pp_states_info));
540 	state_info->nums = 1;
541 	state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
542 
543 	return 0;
544 }
545 
546 bool is_support_sw_smu(struct amdgpu_device *adev)
547 {
548 	/* vega20 is 11.0.2, but it's supported via the powerplay code */
549 	if (adev->asic_type == CHIP_VEGA20)
550 		return false;
551 
552 	if (amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0))
553 		return true;
554 
555 	return false;
556 }
557 
558 bool is_support_cclk_dpm(struct amdgpu_device *adev)
559 {
560 	struct smu_context *smu = adev->powerplay.pp_handle;
561 
562 	if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
563 		return false;
564 
565 	return true;
566 }
567 
568 
569 static int smu_sys_get_pp_table(void *handle,
570 				char **table)
571 {
572 	struct smu_context *smu = handle;
573 	struct smu_table_context *smu_table = &smu->smu_table;
574 
575 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
576 		return -EOPNOTSUPP;
577 
578 	if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
579 		return -EINVAL;
580 
581 	if (smu_table->hardcode_pptable)
582 		*table = smu_table->hardcode_pptable;
583 	else
584 		*table = smu_table->power_play_table;
585 
586 	return smu_table->power_play_table_size;
587 }
588 
589 static int smu_sys_set_pp_table(void *handle,
590 				const char *buf,
591 				size_t size)
592 {
593 	struct smu_context *smu = handle;
594 	struct smu_table_context *smu_table = &smu->smu_table;
595 	ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
596 	int ret = 0;
597 
598 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
599 		return -EOPNOTSUPP;
600 
601 	if (header->usStructureSize != size) {
602 		dev_err(smu->adev->dev, "pp table size not matched !\n");
603 		return -EIO;
604 	}
605 
606 	if (!smu_table->hardcode_pptable) {
607 		smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
608 		if (!smu_table->hardcode_pptable)
609 			return -ENOMEM;
610 	}
611 
612 	memcpy(smu_table->hardcode_pptable, buf, size);
613 	smu_table->power_play_table = smu_table->hardcode_pptable;
614 	smu_table->power_play_table_size = size;
615 
616 	/*
617 	 * Special hw_fini action(for Navi1x, the DPMs disablement will be
618 	 * skipped) may be needed for custom pptable uploading.
619 	 */
620 	smu->uploading_custom_pp_table = true;
621 
622 	ret = smu_reset(smu);
623 	if (ret)
624 		dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
625 
626 	smu->uploading_custom_pp_table = false;
627 
628 	return ret;
629 }
630 
631 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
632 {
633 	struct smu_feature *feature = &smu->smu_feature;
634 	uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
635 	int ret = 0;
636 
637 	/*
638 	 * With SCPM enabled, the allowed featuremasks setting(via
639 	 * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
640 	 * That means there is no way to let PMFW knows the settings below.
641 	 * Thus, we just assume all the features are allowed under
642 	 * such scenario.
643 	 */
644 	if (smu->adev->scpm_enabled) {
645 		bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
646 		return 0;
647 	}
648 
649 	bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
650 
651 	ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
652 					     SMU_FEATURE_MAX/32);
653 	if (ret)
654 		return ret;
655 
656 	bitmap_or(feature->allowed, feature->allowed,
657 		      (unsigned long *)allowed_feature_mask,
658 		      feature->feature_num);
659 
660 	return ret;
661 }
662 
663 static int smu_set_funcs(struct amdgpu_device *adev)
664 {
665 	struct smu_context *smu = adev->powerplay.pp_handle;
666 
667 	if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
668 		smu->od_enabled = true;
669 
670 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
671 	case IP_VERSION(11, 0, 0):
672 	case IP_VERSION(11, 0, 5):
673 	case IP_VERSION(11, 0, 9):
674 		navi10_set_ppt_funcs(smu);
675 		break;
676 	case IP_VERSION(11, 0, 7):
677 	case IP_VERSION(11, 0, 11):
678 	case IP_VERSION(11, 0, 12):
679 	case IP_VERSION(11, 0, 13):
680 		sienna_cichlid_set_ppt_funcs(smu);
681 		break;
682 	case IP_VERSION(12, 0, 0):
683 	case IP_VERSION(12, 0, 1):
684 		renoir_set_ppt_funcs(smu);
685 		break;
686 	case IP_VERSION(11, 5, 0):
687 		vangogh_set_ppt_funcs(smu);
688 		break;
689 	case IP_VERSION(13, 0, 1):
690 	case IP_VERSION(13, 0, 3):
691 	case IP_VERSION(13, 0, 8):
692 		yellow_carp_set_ppt_funcs(smu);
693 		break;
694 	case IP_VERSION(13, 0, 4):
695 	case IP_VERSION(13, 0, 11):
696 		smu_v13_0_4_set_ppt_funcs(smu);
697 		break;
698 	case IP_VERSION(13, 0, 5):
699 		smu_v13_0_5_set_ppt_funcs(smu);
700 		break;
701 	case IP_VERSION(11, 0, 8):
702 		cyan_skillfish_set_ppt_funcs(smu);
703 		break;
704 	case IP_VERSION(11, 0, 2):
705 		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
706 		arcturus_set_ppt_funcs(smu);
707 		/* OD is not supported on Arcturus */
708 		smu->od_enabled = false;
709 		break;
710 	case IP_VERSION(13, 0, 2):
711 		aldebaran_set_ppt_funcs(smu);
712 		/* Enable pp_od_clk_voltage node */
713 		smu->od_enabled = true;
714 		break;
715 	case IP_VERSION(13, 0, 0):
716 	case IP_VERSION(13, 0, 10):
717 		smu_v13_0_0_set_ppt_funcs(smu);
718 		break;
719 	case IP_VERSION(13, 0, 6):
720 	case IP_VERSION(13, 0, 14):
721 		smu_v13_0_6_set_ppt_funcs(smu);
722 		/* Enable pp_od_clk_voltage node */
723 		smu->od_enabled = true;
724 		break;
725 	case IP_VERSION(13, 0, 7):
726 		smu_v13_0_7_set_ppt_funcs(smu);
727 		break;
728 	case IP_VERSION(14, 0, 0):
729 	case IP_VERSION(14, 0, 1):
730 	case IP_VERSION(14, 0, 4):
731 		smu_v14_0_0_set_ppt_funcs(smu);
732 		break;
733 	case IP_VERSION(14, 0, 2):
734 	case IP_VERSION(14, 0, 3):
735 		smu_v14_0_2_set_ppt_funcs(smu);
736 		break;
737 	default:
738 		return -EINVAL;
739 	}
740 
741 	return 0;
742 }
743 
744 static int smu_early_init(void *handle)
745 {
746 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
747 	struct smu_context *smu;
748 	int r;
749 
750 	smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
751 	if (!smu)
752 		return -ENOMEM;
753 
754 	smu->adev = adev;
755 	smu->pm_enabled = !!amdgpu_dpm;
756 	smu->is_apu = false;
757 	smu->smu_baco.state = SMU_BACO_STATE_NONE;
758 	smu->smu_baco.platform_support = false;
759 	smu->smu_baco.maco_support = false;
760 	smu->user_dpm_profile.fan_mode = -1;
761 
762 	mutex_init(&smu->message_lock);
763 
764 	adev->powerplay.pp_handle = smu;
765 	adev->powerplay.pp_funcs = &swsmu_pm_funcs;
766 
767 	r = smu_set_funcs(adev);
768 	if (r)
769 		return r;
770 	return smu_init_microcode(smu);
771 }
772 
773 static int smu_set_default_dpm_table(struct smu_context *smu)
774 {
775 	struct amdgpu_device *adev = smu->adev;
776 	struct smu_power_context *smu_power = &smu->smu_power;
777 	struct smu_power_gate *power_gate = &smu_power->power_gate;
778 	int vcn_gate, jpeg_gate;
779 	int ret = 0;
780 
781 	if (!smu->ppt_funcs->set_default_dpm_table)
782 		return 0;
783 
784 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
785 		vcn_gate = atomic_read(&power_gate->vcn_gated);
786 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
787 		jpeg_gate = atomic_read(&power_gate->jpeg_gated);
788 
789 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
790 		ret = smu_dpm_set_vcn_enable(smu, true);
791 		if (ret)
792 			return ret;
793 	}
794 
795 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
796 		ret = smu_dpm_set_jpeg_enable(smu, true);
797 		if (ret)
798 			goto err_out;
799 	}
800 
801 	ret = smu->ppt_funcs->set_default_dpm_table(smu);
802 	if (ret)
803 		dev_err(smu->adev->dev,
804 			"Failed to setup default dpm clock tables!\n");
805 
806 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
807 		smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
808 err_out:
809 	if (adev->pg_flags & AMD_PG_SUPPORT_VCN)
810 		smu_dpm_set_vcn_enable(smu, !vcn_gate);
811 
812 	return ret;
813 }
814 
815 static int smu_apply_default_config_table_settings(struct smu_context *smu)
816 {
817 	struct amdgpu_device *adev = smu->adev;
818 	int ret = 0;
819 
820 	ret = smu_get_default_config_table_settings(smu,
821 						    &adev->pm.config_table);
822 	if (ret)
823 		return ret;
824 
825 	return smu_set_config_table(smu, &adev->pm.config_table);
826 }
827 
828 static int smu_late_init(void *handle)
829 {
830 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
831 	struct smu_context *smu = adev->powerplay.pp_handle;
832 	int ret = 0;
833 
834 	smu_set_fine_grain_gfx_freq_parameters(smu);
835 
836 	if (!smu->pm_enabled)
837 		return 0;
838 
839 	ret = smu_post_init(smu);
840 	if (ret) {
841 		dev_err(adev->dev, "Failed to post smu init!\n");
842 		return ret;
843 	}
844 
845 	/*
846 	 * Explicitly notify PMFW the power mode the system in. Since
847 	 * the PMFW may boot the ASIC with a different mode.
848 	 * For those supporting ACDC switch via gpio, PMFW will
849 	 * handle the switch automatically. Driver involvement
850 	 * is unnecessary.
851 	 */
852 	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
853 	smu_set_ac_dc(smu);
854 
855 	if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
856 	    (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
857 		return 0;
858 
859 	if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
860 		ret = smu_set_default_od_settings(smu);
861 		if (ret) {
862 			dev_err(adev->dev, "Failed to setup default OD settings!\n");
863 			return ret;
864 		}
865 	}
866 
867 	ret = smu_populate_umd_state_clk(smu);
868 	if (ret) {
869 		dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
870 		return ret;
871 	}
872 
873 	ret = smu_get_asic_power_limits(smu,
874 					&smu->current_power_limit,
875 					&smu->default_power_limit,
876 					&smu->max_power_limit,
877 					&smu->min_power_limit);
878 	if (ret) {
879 		dev_err(adev->dev, "Failed to get asic power limits!\n");
880 		return ret;
881 	}
882 
883 	if (!amdgpu_sriov_vf(adev))
884 		smu_get_unique_id(smu);
885 
886 	smu_get_fan_parameters(smu);
887 
888 	smu_handle_task(smu,
889 			smu->smu_dpm.dpm_level,
890 			AMD_PP_TASK_COMPLETE_INIT);
891 
892 	ret = smu_apply_default_config_table_settings(smu);
893 	if (ret && (ret != -EOPNOTSUPP)) {
894 		dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
895 		return ret;
896 	}
897 
898 	smu_restore_dpm_user_profile(smu);
899 
900 	return 0;
901 }
902 
903 static int smu_init_fb_allocations(struct smu_context *smu)
904 {
905 	struct amdgpu_device *adev = smu->adev;
906 	struct smu_table_context *smu_table = &smu->smu_table;
907 	struct smu_table *tables = smu_table->tables;
908 	struct smu_table *driver_table = &(smu_table->driver_table);
909 	uint32_t max_table_size = 0;
910 	int ret, i;
911 
912 	/* VRAM allocation for tool table */
913 	if (tables[SMU_TABLE_PMSTATUSLOG].size) {
914 		ret = amdgpu_bo_create_kernel(adev,
915 					      tables[SMU_TABLE_PMSTATUSLOG].size,
916 					      tables[SMU_TABLE_PMSTATUSLOG].align,
917 					      tables[SMU_TABLE_PMSTATUSLOG].domain,
918 					      &tables[SMU_TABLE_PMSTATUSLOG].bo,
919 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
920 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
921 		if (ret) {
922 			dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
923 			return ret;
924 		}
925 	}
926 
927 	driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
928 	/* VRAM allocation for driver table */
929 	for (i = 0; i < SMU_TABLE_COUNT; i++) {
930 		if (tables[i].size == 0)
931 			continue;
932 
933 		/* If one of the tables has VRAM domain restriction, keep it in
934 		 * VRAM
935 		 */
936 		if ((tables[i].domain &
937 		    (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
938 			    AMDGPU_GEM_DOMAIN_VRAM)
939 			driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
940 
941 		if (i == SMU_TABLE_PMSTATUSLOG)
942 			continue;
943 
944 		if (max_table_size < tables[i].size)
945 			max_table_size = tables[i].size;
946 	}
947 
948 	driver_table->size = max_table_size;
949 	driver_table->align = PAGE_SIZE;
950 
951 	ret = amdgpu_bo_create_kernel(adev,
952 				      driver_table->size,
953 				      driver_table->align,
954 				      driver_table->domain,
955 				      &driver_table->bo,
956 				      &driver_table->mc_address,
957 				      &driver_table->cpu_addr);
958 	if (ret) {
959 		dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
960 		if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
961 			amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
962 					      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
963 					      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
964 	}
965 
966 	return ret;
967 }
968 
969 static int smu_fini_fb_allocations(struct smu_context *smu)
970 {
971 	struct smu_table_context *smu_table = &smu->smu_table;
972 	struct smu_table *tables = smu_table->tables;
973 	struct smu_table *driver_table = &(smu_table->driver_table);
974 
975 	if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
976 		amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
977 				      &tables[SMU_TABLE_PMSTATUSLOG].mc_address,
978 				      &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
979 
980 	amdgpu_bo_free_kernel(&driver_table->bo,
981 			      &driver_table->mc_address,
982 			      &driver_table->cpu_addr);
983 
984 	return 0;
985 }
986 
987 /**
988  * smu_alloc_memory_pool - allocate memory pool in the system memory
989  *
990  * @smu: amdgpu_device pointer
991  *
992  * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
993  * and DramLogSetDramAddr can notify it changed.
994  *
995  * Returns 0 on success, error on failure.
996  */
997 static int smu_alloc_memory_pool(struct smu_context *smu)
998 {
999 	struct amdgpu_device *adev = smu->adev;
1000 	struct smu_table_context *smu_table = &smu->smu_table;
1001 	struct smu_table *memory_pool = &smu_table->memory_pool;
1002 	uint64_t pool_size = smu->pool_size;
1003 	int ret = 0;
1004 
1005 	if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1006 		return ret;
1007 
1008 	memory_pool->size = pool_size;
1009 	memory_pool->align = PAGE_SIZE;
1010 	memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
1011 
1012 	switch (pool_size) {
1013 	case SMU_MEMORY_POOL_SIZE_256_MB:
1014 	case SMU_MEMORY_POOL_SIZE_512_MB:
1015 	case SMU_MEMORY_POOL_SIZE_1_GB:
1016 	case SMU_MEMORY_POOL_SIZE_2_GB:
1017 		ret = amdgpu_bo_create_kernel(adev,
1018 					      memory_pool->size,
1019 					      memory_pool->align,
1020 					      memory_pool->domain,
1021 					      &memory_pool->bo,
1022 					      &memory_pool->mc_address,
1023 					      &memory_pool->cpu_addr);
1024 		if (ret)
1025 			dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1026 		break;
1027 	default:
1028 		break;
1029 	}
1030 
1031 	return ret;
1032 }
1033 
1034 static int smu_free_memory_pool(struct smu_context *smu)
1035 {
1036 	struct smu_table_context *smu_table = &smu->smu_table;
1037 	struct smu_table *memory_pool = &smu_table->memory_pool;
1038 
1039 	if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1040 		return 0;
1041 
1042 	amdgpu_bo_free_kernel(&memory_pool->bo,
1043 			      &memory_pool->mc_address,
1044 			      &memory_pool->cpu_addr);
1045 
1046 	memset(memory_pool, 0, sizeof(struct smu_table));
1047 
1048 	return 0;
1049 }
1050 
1051 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1052 {
1053 	struct smu_table_context *smu_table = &smu->smu_table;
1054 	struct smu_table *dummy_read_1_table =
1055 			&smu_table->dummy_read_1_table;
1056 	struct amdgpu_device *adev = smu->adev;
1057 	int ret = 0;
1058 
1059 	if (!dummy_read_1_table->size)
1060 		return 0;
1061 
1062 	ret = amdgpu_bo_create_kernel(adev,
1063 				      dummy_read_1_table->size,
1064 				      dummy_read_1_table->align,
1065 				      dummy_read_1_table->domain,
1066 				      &dummy_read_1_table->bo,
1067 				      &dummy_read_1_table->mc_address,
1068 				      &dummy_read_1_table->cpu_addr);
1069 	if (ret)
1070 		dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1071 
1072 	return ret;
1073 }
1074 
1075 static void smu_free_dummy_read_table(struct smu_context *smu)
1076 {
1077 	struct smu_table_context *smu_table = &smu->smu_table;
1078 	struct smu_table *dummy_read_1_table =
1079 			&smu_table->dummy_read_1_table;
1080 
1081 
1082 	amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1083 			      &dummy_read_1_table->mc_address,
1084 			      &dummy_read_1_table->cpu_addr);
1085 
1086 	memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1087 }
1088 
1089 static int smu_smc_table_sw_init(struct smu_context *smu)
1090 {
1091 	int ret;
1092 
1093 	/**
1094 	 * Create smu_table structure, and init smc tables such as
1095 	 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1096 	 */
1097 	ret = smu_init_smc_tables(smu);
1098 	if (ret) {
1099 		dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1100 		return ret;
1101 	}
1102 
1103 	/**
1104 	 * Create smu_power_context structure, and allocate smu_dpm_context and
1105 	 * context size to fill the smu_power_context data.
1106 	 */
1107 	ret = smu_init_power(smu);
1108 	if (ret) {
1109 		dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1110 		return ret;
1111 	}
1112 
1113 	/*
1114 	 * allocate vram bos to store smc table contents.
1115 	 */
1116 	ret = smu_init_fb_allocations(smu);
1117 	if (ret)
1118 		return ret;
1119 
1120 	ret = smu_alloc_memory_pool(smu);
1121 	if (ret)
1122 		return ret;
1123 
1124 	ret = smu_alloc_dummy_read_table(smu);
1125 	if (ret)
1126 		return ret;
1127 
1128 	ret = smu_i2c_init(smu);
1129 	if (ret)
1130 		return ret;
1131 
1132 	return 0;
1133 }
1134 
1135 static int smu_smc_table_sw_fini(struct smu_context *smu)
1136 {
1137 	int ret;
1138 
1139 	smu_i2c_fini(smu);
1140 
1141 	smu_free_dummy_read_table(smu);
1142 
1143 	ret = smu_free_memory_pool(smu);
1144 	if (ret)
1145 		return ret;
1146 
1147 	ret = smu_fini_fb_allocations(smu);
1148 	if (ret)
1149 		return ret;
1150 
1151 	ret = smu_fini_power(smu);
1152 	if (ret) {
1153 		dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1154 		return ret;
1155 	}
1156 
1157 	ret = smu_fini_smc_tables(smu);
1158 	if (ret) {
1159 		dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1160 		return ret;
1161 	}
1162 
1163 	return 0;
1164 }
1165 
1166 static void smu_throttling_logging_work_fn(struct work_struct *work)
1167 {
1168 	struct smu_context *smu = container_of(work, struct smu_context,
1169 					       throttling_logging_work);
1170 
1171 	smu_log_thermal_throttling(smu);
1172 }
1173 
1174 static void smu_interrupt_work_fn(struct work_struct *work)
1175 {
1176 	struct smu_context *smu = container_of(work, struct smu_context,
1177 					       interrupt_work);
1178 
1179 	if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1180 		smu->ppt_funcs->interrupt_work(smu);
1181 }
1182 
1183 static void smu_swctf_delayed_work_handler(struct work_struct *work)
1184 {
1185 	struct smu_context *smu =
1186 		container_of(work, struct smu_context, swctf_delayed_work.work);
1187 	struct smu_temperature_range *range =
1188 				&smu->thermal_range;
1189 	struct amdgpu_device *adev = smu->adev;
1190 	uint32_t hotspot_tmp, size;
1191 
1192 	/*
1193 	 * If the hotspot temperature is confirmed as below SW CTF setting point
1194 	 * after the delay enforced, nothing will be done.
1195 	 * Otherwise, a graceful shutdown will be performed to prevent further damage.
1196 	 */
1197 	if (range->software_shutdown_temp &&
1198 	    smu->ppt_funcs->read_sensor &&
1199 	    !smu->ppt_funcs->read_sensor(smu,
1200 					 AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1201 					 &hotspot_tmp,
1202 					 &size) &&
1203 	    hotspot_tmp / 1000 < range->software_shutdown_temp)
1204 		return;
1205 
1206 	dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1207 	dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1208 	orderly_poweroff(true);
1209 }
1210 
1211 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1212 {
1213 	struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1214 	struct smu_dpm_policy_ctxt *policy_ctxt;
1215 	struct smu_dpm_policy *policy;
1216 
1217 	policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1218 	if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1219 		if (policy)
1220 			policy->current_level = XGMI_PLPD_DEFAULT;
1221 		return;
1222 	}
1223 
1224 	/* PMFW put PLPD into default policy after enabling the feature */
1225 	if (smu_feature_is_enabled(smu,
1226 				   SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1227 		if (policy)
1228 			policy->current_level = XGMI_PLPD_DEFAULT;
1229 	} else {
1230 		policy_ctxt = dpm_ctxt->dpm_policies;
1231 		if (policy_ctxt)
1232 			policy_ctxt->policy_mask &=
1233 				~BIT(PP_PM_POLICY_XGMI_PLPD);
1234 	}
1235 }
1236 
1237 static int smu_sw_init(void *handle)
1238 {
1239 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1240 	struct smu_context *smu = adev->powerplay.pp_handle;
1241 	int ret;
1242 
1243 	smu->pool_size = adev->pm.smu_prv_buffer_size;
1244 	smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1245 	bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1246 	bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1247 
1248 	INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1249 	INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1250 	atomic64_set(&smu->throttle_int_counter, 0);
1251 	smu->watermarks_bitmap = 0;
1252 	smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1253 	smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1254 
1255 	atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1256 	atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1257 	atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1258 	atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1259 
1260 	smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1261 	smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1262 	smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1263 	smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1264 	smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1265 	smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1266 	smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1267 	smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1268 
1269 	smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1270 	smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1271 	smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1272 	smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1273 	smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1274 	smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1275 	smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1276 	smu->display_config = &adev->pm.pm_display_cfg;
1277 
1278 	smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1279 	smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1280 
1281 	INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1282 			  smu_swctf_delayed_work_handler);
1283 
1284 	ret = smu_smc_table_sw_init(smu);
1285 	if (ret) {
1286 		dev_err(adev->dev, "Failed to sw init smc table!\n");
1287 		return ret;
1288 	}
1289 
1290 	/* get boot_values from vbios to set revision, gfxclk, and etc. */
1291 	ret = smu_get_vbios_bootup_values(smu);
1292 	if (ret) {
1293 		dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1294 		return ret;
1295 	}
1296 
1297 	ret = smu_init_pptable_microcode(smu);
1298 	if (ret) {
1299 		dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1300 		return ret;
1301 	}
1302 
1303 	ret = smu_register_irq_handler(smu);
1304 	if (ret) {
1305 		dev_err(adev->dev, "Failed to register smc irq handler!\n");
1306 		return ret;
1307 	}
1308 
1309 	/* If there is no way to query fan control mode, fan control is not supported */
1310 	if (!smu->ppt_funcs->get_fan_control_mode)
1311 		smu->adev->pm.no_fan = true;
1312 
1313 	return 0;
1314 }
1315 
1316 static int smu_sw_fini(void *handle)
1317 {
1318 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1319 	struct smu_context *smu = adev->powerplay.pp_handle;
1320 	int ret;
1321 
1322 	ret = smu_smc_table_sw_fini(smu);
1323 	if (ret) {
1324 		dev_err(adev->dev, "Failed to sw fini smc table!\n");
1325 		return ret;
1326 	}
1327 
1328 	smu_fini_microcode(smu);
1329 
1330 	return 0;
1331 }
1332 
1333 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1334 {
1335 	struct amdgpu_device *adev = smu->adev;
1336 	struct smu_temperature_range *range =
1337 				&smu->thermal_range;
1338 	int ret = 0;
1339 
1340 	if (!smu->ppt_funcs->get_thermal_temperature_range)
1341 		return 0;
1342 
1343 	ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1344 	if (ret)
1345 		return ret;
1346 
1347 	adev->pm.dpm.thermal.min_temp = range->min;
1348 	adev->pm.dpm.thermal.max_temp = range->max;
1349 	adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1350 	adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1351 	adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1352 	adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1353 	adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1354 	adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1355 	adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1356 
1357 	return ret;
1358 }
1359 
1360 /**
1361  * smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1362  *
1363  * @smu: smu_context pointer
1364  *
1365  * Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1366  * Returns 0 on success, error on failure.
1367  */
1368 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1369 {
1370 	struct wbrf_ranges_in_out wbrf_exclusion = {0};
1371 	struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1372 	struct amdgpu_device *adev = smu->adev;
1373 	uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1374 	uint64_t start, end;
1375 	int ret, i, j;
1376 
1377 	ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1378 	if (ret) {
1379 		dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1380 		return ret;
1381 	}
1382 
1383 	/*
1384 	 * The exclusion ranges array we got might be filled with holes and duplicate
1385 	 * entries. For example:
1386 	 * {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1387 	 * We need to do some sortups to eliminate those holes and duplicate entries.
1388 	 * Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1389 	 */
1390 	for (i = 0; i < num_of_wbrf_ranges; i++) {
1391 		start = wifi_bands[i].start;
1392 		end = wifi_bands[i].end;
1393 
1394 		/* get the last valid entry to fill the intermediate hole */
1395 		if (!start && !end) {
1396 			for (j = num_of_wbrf_ranges - 1; j > i; j--)
1397 				if (wifi_bands[j].start && wifi_bands[j].end)
1398 					break;
1399 
1400 			/* no valid entry left */
1401 			if (j <= i)
1402 				break;
1403 
1404 			start = wifi_bands[i].start = wifi_bands[j].start;
1405 			end = wifi_bands[i].end = wifi_bands[j].end;
1406 			wifi_bands[j].start = 0;
1407 			wifi_bands[j].end = 0;
1408 			num_of_wbrf_ranges = j;
1409 		}
1410 
1411 		/* eliminate duplicate entries */
1412 		for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1413 			if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1414 				wifi_bands[j].start = 0;
1415 				wifi_bands[j].end = 0;
1416 			}
1417 		}
1418 	}
1419 
1420 	/* Send the sorted wifi_bands to PMFW */
1421 	ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1422 	/* Try to set the wifi_bands again */
1423 	if (unlikely(ret == -EBUSY)) {
1424 		mdelay(5);
1425 		ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1426 	}
1427 
1428 	return ret;
1429 }
1430 
1431 /**
1432  * smu_wbrf_event_handler - handle notify events
1433  *
1434  * @nb: notifier block
1435  * @action: event type
1436  * @_arg: event data
1437  *
1438  * Calls relevant amdgpu function in response to wbrf event
1439  * notification from kernel.
1440  */
1441 static int smu_wbrf_event_handler(struct notifier_block *nb,
1442 				  unsigned long action, void *_arg)
1443 {
1444 	struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1445 
1446 	switch (action) {
1447 	case WBRF_CHANGED:
1448 		schedule_delayed_work(&smu->wbrf_delayed_work,
1449 				      msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1450 		break;
1451 	default:
1452 		return NOTIFY_DONE;
1453 	}
1454 
1455 	return NOTIFY_OK;
1456 }
1457 
1458 /**
1459  * smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1460  *
1461  * @work: struct work_struct pointer
1462  *
1463  * Flood is over and driver will consume the latest exclusion ranges.
1464  */
1465 static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1466 {
1467 	struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1468 
1469 	smu_wbrf_handle_exclusion_ranges(smu);
1470 }
1471 
1472 /**
1473  * smu_wbrf_support_check - check wbrf support
1474  *
1475  * @smu: smu_context pointer
1476  *
1477  * Verifies the ACPI interface whether wbrf is supported.
1478  */
1479 static void smu_wbrf_support_check(struct smu_context *smu)
1480 {
1481 	struct amdgpu_device *adev = smu->adev;
1482 
1483 	smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1484 							acpi_amd_wbrf_supported_consumer(adev->dev);
1485 
1486 	if (smu->wbrf_supported)
1487 		dev_info(adev->dev, "RF interference mitigation is supported\n");
1488 }
1489 
1490 /**
1491  * smu_wbrf_init - init driver wbrf support
1492  *
1493  * @smu: smu_context pointer
1494  *
1495  * Verifies the AMD ACPI interfaces and registers with the wbrf
1496  * notifier chain if wbrf feature is supported.
1497  * Returns 0 on success, error on failure.
1498  */
1499 static int smu_wbrf_init(struct smu_context *smu)
1500 {
1501 	int ret;
1502 
1503 	if (!smu->wbrf_supported)
1504 		return 0;
1505 
1506 	INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1507 
1508 	smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1509 	ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1510 	if (ret)
1511 		return ret;
1512 
1513 	/*
1514 	 * Some wifiband exclusion ranges may be already there
1515 	 * before our driver loaded. To make sure our driver
1516 	 * is awared of those exclusion ranges.
1517 	 */
1518 	schedule_delayed_work(&smu->wbrf_delayed_work,
1519 			      msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1520 
1521 	return 0;
1522 }
1523 
1524 /**
1525  * smu_wbrf_fini - tear down driver wbrf support
1526  *
1527  * @smu: smu_context pointer
1528  *
1529  * Unregisters with the wbrf notifier chain.
1530  */
1531 static void smu_wbrf_fini(struct smu_context *smu)
1532 {
1533 	if (!smu->wbrf_supported)
1534 		return;
1535 
1536 	amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1537 
1538 	cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1539 }
1540 
1541 static int smu_smc_hw_setup(struct smu_context *smu)
1542 {
1543 	struct smu_feature *feature = &smu->smu_feature;
1544 	struct amdgpu_device *adev = smu->adev;
1545 	uint8_t pcie_gen = 0, pcie_width = 0;
1546 	uint64_t features_supported;
1547 	int ret = 0;
1548 
1549 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1550 	case IP_VERSION(11, 0, 7):
1551 	case IP_VERSION(11, 0, 11):
1552 	case IP_VERSION(11, 5, 0):
1553 	case IP_VERSION(11, 0, 12):
1554 		if (adev->in_suspend && smu_is_dpm_running(smu)) {
1555 			dev_info(adev->dev, "dpm has been enabled\n");
1556 			ret = smu_system_features_control(smu, true);
1557 			if (ret)
1558 				dev_err(adev->dev, "Failed system features control!\n");
1559 			return ret;
1560 		}
1561 		break;
1562 	default:
1563 		break;
1564 	}
1565 
1566 	ret = smu_init_display_count(smu, 0);
1567 	if (ret) {
1568 		dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1569 		return ret;
1570 	}
1571 
1572 	ret = smu_set_driver_table_location(smu);
1573 	if (ret) {
1574 		dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1575 		return ret;
1576 	}
1577 
1578 	/*
1579 	 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1580 	 */
1581 	ret = smu_set_tool_table_location(smu);
1582 	if (ret) {
1583 		dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1584 		return ret;
1585 	}
1586 
1587 	/*
1588 	 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1589 	 * pool location.
1590 	 */
1591 	ret = smu_notify_memory_pool_location(smu);
1592 	if (ret) {
1593 		dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1594 		return ret;
1595 	}
1596 
1597 	/*
1598 	 * It is assumed the pptable used before runpm is same as
1599 	 * the one used afterwards. Thus, we can reuse the stored
1600 	 * copy and do not need to resetup the pptable again.
1601 	 */
1602 	if (!adev->in_runpm) {
1603 		ret = smu_setup_pptable(smu);
1604 		if (ret) {
1605 			dev_err(adev->dev, "Failed to setup pptable!\n");
1606 			return ret;
1607 		}
1608 	}
1609 
1610 	/* smu_dump_pptable(smu); */
1611 
1612 	/*
1613 	 * With SCPM enabled, PSP is responsible for the PPTable transferring
1614 	 * (to SMU). Driver involvement is not needed and permitted.
1615 	 */
1616 	if (!adev->scpm_enabled) {
1617 		/*
1618 		 * Copy pptable bo in the vram to smc with SMU MSGs such as
1619 		 * SetDriverDramAddr and TransferTableDram2Smu.
1620 		 */
1621 		ret = smu_write_pptable(smu);
1622 		if (ret) {
1623 			dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1624 			return ret;
1625 		}
1626 	}
1627 
1628 	/* issue Run*Btc msg */
1629 	ret = smu_run_btc(smu);
1630 	if (ret)
1631 		return ret;
1632 
1633 	/* Enable UclkShadow on wbrf supported */
1634 	if (smu->wbrf_supported) {
1635 		ret = smu_enable_uclk_shadow(smu, true);
1636 		if (ret) {
1637 			dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1638 			return ret;
1639 		}
1640 	}
1641 
1642 	/*
1643 	 * With SCPM enabled, these actions(and relevant messages) are
1644 	 * not needed and permitted.
1645 	 */
1646 	if (!adev->scpm_enabled) {
1647 		ret = smu_feature_set_allowed_mask(smu);
1648 		if (ret) {
1649 			dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1650 			return ret;
1651 		}
1652 	}
1653 
1654 	ret = smu_system_features_control(smu, true);
1655 	if (ret) {
1656 		dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1657 		return ret;
1658 	}
1659 
1660 	smu_init_xgmi_plpd_mode(smu);
1661 
1662 	ret = smu_feature_get_enabled_mask(smu, &features_supported);
1663 	if (ret) {
1664 		dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1665 		return ret;
1666 	}
1667 	bitmap_copy(feature->supported,
1668 		    (unsigned long *)&features_supported,
1669 		    feature->feature_num);
1670 
1671 	if (!smu_is_dpm_running(smu))
1672 		dev_info(adev->dev, "dpm has been disabled\n");
1673 
1674 	/*
1675 	 * Set initialized values (get from vbios) to dpm tables context such as
1676 	 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1677 	 * type of clks.
1678 	 */
1679 	ret = smu_set_default_dpm_table(smu);
1680 	if (ret) {
1681 		dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1682 		return ret;
1683 	}
1684 
1685 	if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1686 		pcie_gen = 3;
1687 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1688 		pcie_gen = 2;
1689 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1690 		pcie_gen = 1;
1691 	else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1692 		pcie_gen = 0;
1693 
1694 	/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1695 	 * Bit 15:8:  PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1696 	 * Bit 7:0:   PCIE lane width, 1 to 7 corresponds is x1 to x32
1697 	 */
1698 	if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1699 		pcie_width = 6;
1700 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1701 		pcie_width = 5;
1702 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1703 		pcie_width = 4;
1704 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1705 		pcie_width = 3;
1706 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1707 		pcie_width = 2;
1708 	else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1709 		pcie_width = 1;
1710 	ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1711 	if (ret) {
1712 		dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1713 		return ret;
1714 	}
1715 
1716 	ret = smu_get_thermal_temperature_range(smu);
1717 	if (ret) {
1718 		dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1719 		return ret;
1720 	}
1721 
1722 	ret = smu_enable_thermal_alert(smu);
1723 	if (ret) {
1724 	  dev_err(adev->dev, "Failed to enable thermal alert!\n");
1725 	  return ret;
1726 	}
1727 
1728 	ret = smu_notify_display_change(smu);
1729 	if (ret) {
1730 		dev_err(adev->dev, "Failed to notify display change!\n");
1731 		return ret;
1732 	}
1733 
1734 	/*
1735 	 * Set min deep sleep dce fclk with bootup value from vbios via
1736 	 * SetMinDeepSleepDcefclk MSG.
1737 	 */
1738 	ret = smu_set_min_dcef_deep_sleep(smu,
1739 					  smu->smu_table.boot_values.dcefclk / 100);
1740 	if (ret) {
1741 		dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1742 		return ret;
1743 	}
1744 
1745 	/* Init wbrf support. Properly setup the notifier */
1746 	ret = smu_wbrf_init(smu);
1747 	if (ret)
1748 		dev_err(adev->dev, "Error during wbrf init call\n");
1749 
1750 	return ret;
1751 }
1752 
1753 static int smu_start_smc_engine(struct smu_context *smu)
1754 {
1755 	struct amdgpu_device *adev = smu->adev;
1756 	int ret = 0;
1757 
1758 	smu->smc_fw_state = SMU_FW_INIT;
1759 
1760 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1761 		if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1762 			if (smu->ppt_funcs->load_microcode) {
1763 				ret = smu->ppt_funcs->load_microcode(smu);
1764 				if (ret)
1765 					return ret;
1766 			}
1767 		}
1768 	}
1769 
1770 	if (smu->ppt_funcs->check_fw_status) {
1771 		ret = smu->ppt_funcs->check_fw_status(smu);
1772 		if (ret) {
1773 			dev_err(adev->dev, "SMC is not ready\n");
1774 			return ret;
1775 		}
1776 	}
1777 
1778 	/*
1779 	 * Send msg GetDriverIfVersion to check if the return value is equal
1780 	 * with DRIVER_IF_VERSION of smc header.
1781 	 */
1782 	ret = smu_check_fw_version(smu);
1783 	if (ret)
1784 		return ret;
1785 
1786 	return ret;
1787 }
1788 
1789 static int smu_hw_init(void *handle)
1790 {
1791 	int ret;
1792 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1793 	struct smu_context *smu = adev->powerplay.pp_handle;
1794 
1795 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
1796 		smu->pm_enabled = false;
1797 		return 0;
1798 	}
1799 
1800 	ret = smu_start_smc_engine(smu);
1801 	if (ret) {
1802 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
1803 		return ret;
1804 	}
1805 
1806 	/*
1807 	 * Check whether wbrf is supported. This needs to be done
1808 	 * before SMU setup starts since part of SMU configuration
1809 	 * relies on this.
1810 	 */
1811 	smu_wbrf_support_check(smu);
1812 
1813 	if (smu->is_apu) {
1814 		ret = smu_set_gfx_imu_enable(smu);
1815 		if (ret)
1816 			return ret;
1817 		smu_dpm_set_vcn_enable(smu, true);
1818 		smu_dpm_set_jpeg_enable(smu, true);
1819 		smu_dpm_set_vpe_enable(smu, true);
1820 		smu_dpm_set_umsch_mm_enable(smu, true);
1821 		smu_set_mall_enable(smu);
1822 		smu_set_gfx_cgpg(smu, true);
1823 	}
1824 
1825 	if (!smu->pm_enabled)
1826 		return 0;
1827 
1828 	ret = smu_get_driver_allowed_feature_mask(smu);
1829 	if (ret)
1830 		return ret;
1831 
1832 	ret = smu_smc_hw_setup(smu);
1833 	if (ret) {
1834 		dev_err(adev->dev, "Failed to setup smc hw!\n");
1835 		return ret;
1836 	}
1837 
1838 	/*
1839 	 * Move maximum sustainable clock retrieving here considering
1840 	 * 1. It is not needed on resume(from S3).
1841 	 * 2. DAL settings come between .hw_init and .late_init of SMU.
1842 	 *    And DAL needs to know the maximum sustainable clocks. Thus
1843 	 *    it cannot be put in .late_init().
1844 	 */
1845 	ret = smu_init_max_sustainable_clocks(smu);
1846 	if (ret) {
1847 		dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1848 		return ret;
1849 	}
1850 
1851 	adev->pm.dpm_enabled = true;
1852 
1853 	dev_info(adev->dev, "SMU is initialized successfully!\n");
1854 
1855 	return 0;
1856 }
1857 
1858 static int smu_disable_dpms(struct smu_context *smu)
1859 {
1860 	struct amdgpu_device *adev = smu->adev;
1861 	int ret = 0;
1862 	bool use_baco = !smu->is_apu &&
1863 		((amdgpu_in_reset(adev) &&
1864 		  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1865 		 ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1866 
1867 	/*
1868 	 * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1869 	 * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1870 	 */
1871 	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1872 	case IP_VERSION(13, 0, 0):
1873 	case IP_VERSION(13, 0, 7):
1874 	case IP_VERSION(13, 0, 10):
1875 	case IP_VERSION(14, 0, 2):
1876 	case IP_VERSION(14, 0, 3):
1877 		return 0;
1878 	default:
1879 		break;
1880 	}
1881 
1882 	/*
1883 	 * For custom pptable uploading, skip the DPM features
1884 	 * disable process on Navi1x ASICs.
1885 	 *   - As the gfx related features are under control of
1886 	 *     RLC on those ASICs. RLC reinitialization will be
1887 	 *     needed to reenable them. That will cost much more
1888 	 *     efforts.
1889 	 *
1890 	 *   - SMU firmware can handle the DPM reenablement
1891 	 *     properly.
1892 	 */
1893 	if (smu->uploading_custom_pp_table) {
1894 		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1895 		case IP_VERSION(11, 0, 0):
1896 		case IP_VERSION(11, 0, 5):
1897 		case IP_VERSION(11, 0, 9):
1898 		case IP_VERSION(11, 0, 7):
1899 		case IP_VERSION(11, 0, 11):
1900 		case IP_VERSION(11, 5, 0):
1901 		case IP_VERSION(11, 0, 12):
1902 		case IP_VERSION(11, 0, 13):
1903 			return 0;
1904 		default:
1905 			break;
1906 		}
1907 	}
1908 
1909 	/*
1910 	 * For Sienna_Cichlid, PMFW will handle the features disablement properly
1911 	 * on BACO in. Driver involvement is unnecessary.
1912 	 */
1913 	if (use_baco) {
1914 		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1915 		case IP_VERSION(11, 0, 7):
1916 		case IP_VERSION(11, 0, 0):
1917 		case IP_VERSION(11, 0, 5):
1918 		case IP_VERSION(11, 0, 9):
1919 		case IP_VERSION(13, 0, 7):
1920 			return 0;
1921 		default:
1922 			break;
1923 		}
1924 	}
1925 
1926 	/*
1927 	 * For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
1928 	 * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
1929 	 */
1930 	if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
1931 	    smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
1932 		return 0;
1933 
1934 	/*
1935 	 * For gpu reset, runpm and hibernation through BACO,
1936 	 * BACO feature has to be kept enabled.
1937 	 */
1938 	if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1939 		ret = smu_disable_all_features_with_exception(smu,
1940 							      SMU_FEATURE_BACO_BIT);
1941 		if (ret)
1942 			dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1943 	} else {
1944 		/* DisableAllSmuFeatures message is not permitted with SCPM enabled */
1945 		if (!adev->scpm_enabled) {
1946 			ret = smu_system_features_control(smu, false);
1947 			if (ret)
1948 				dev_err(adev->dev, "Failed to disable smu features.\n");
1949 		}
1950 	}
1951 
1952 	/* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
1953 	 * otherwise SMU will hang while interacting with RLC if RLC is halted
1954 	 * this is a WA for Vangogh asic which fix the SMU hang issue.
1955 	 */
1956 	ret = smu_notify_rlc_state(smu, false);
1957 	if (ret) {
1958 		dev_err(adev->dev, "Fail to notify rlc status!\n");
1959 		return ret;
1960 	}
1961 
1962 	if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
1963 	    !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
1964 	    !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
1965 		adev->gfx.rlc.funcs->stop(adev);
1966 
1967 	return ret;
1968 }
1969 
1970 static int smu_smc_hw_cleanup(struct smu_context *smu)
1971 {
1972 	struct amdgpu_device *adev = smu->adev;
1973 	int ret = 0;
1974 
1975 	smu_wbrf_fini(smu);
1976 
1977 	cancel_work_sync(&smu->throttling_logging_work);
1978 	cancel_work_sync(&smu->interrupt_work);
1979 
1980 	ret = smu_disable_thermal_alert(smu);
1981 	if (ret) {
1982 		dev_err(adev->dev, "Fail to disable thermal alert!\n");
1983 		return ret;
1984 	}
1985 
1986 	cancel_delayed_work_sync(&smu->swctf_delayed_work);
1987 
1988 	ret = smu_disable_dpms(smu);
1989 	if (ret) {
1990 		dev_err(adev->dev, "Fail to disable dpm features!\n");
1991 		return ret;
1992 	}
1993 
1994 	return 0;
1995 }
1996 
1997 static int smu_reset_mp1_state(struct smu_context *smu)
1998 {
1999 	struct amdgpu_device *adev = smu->adev;
2000 	int ret = 0;
2001 
2002 	if ((!adev->in_runpm) && (!adev->in_suspend) &&
2003 		(!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2004 									IP_VERSION(13, 0, 10) &&
2005 		!amdgpu_device_has_display_hardware(adev))
2006 		ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2007 
2008 	return ret;
2009 }
2010 
2011 static int smu_hw_fini(void *handle)
2012 {
2013 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2014 	struct smu_context *smu = adev->powerplay.pp_handle;
2015 	int ret;
2016 
2017 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2018 		return 0;
2019 
2020 	smu_dpm_set_vcn_enable(smu, false);
2021 	smu_dpm_set_jpeg_enable(smu, false);
2022 	smu_dpm_set_vpe_enable(smu, false);
2023 	smu_dpm_set_umsch_mm_enable(smu, false);
2024 
2025 	adev->vcn.cur_state = AMD_PG_STATE_GATE;
2026 	adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2027 
2028 	if (!smu->pm_enabled)
2029 		return 0;
2030 
2031 	adev->pm.dpm_enabled = false;
2032 
2033 	ret = smu_smc_hw_cleanup(smu);
2034 	if (ret)
2035 		return ret;
2036 
2037 	ret = smu_reset_mp1_state(smu);
2038 	if (ret)
2039 		return ret;
2040 
2041 	return 0;
2042 }
2043 
2044 static void smu_late_fini(void *handle)
2045 {
2046 	struct amdgpu_device *adev = handle;
2047 	struct smu_context *smu = adev->powerplay.pp_handle;
2048 
2049 	kfree(smu);
2050 }
2051 
2052 static int smu_reset(struct smu_context *smu)
2053 {
2054 	struct amdgpu_device *adev = smu->adev;
2055 	int ret;
2056 
2057 	ret = smu_hw_fini(adev);
2058 	if (ret)
2059 		return ret;
2060 
2061 	ret = smu_hw_init(adev);
2062 	if (ret)
2063 		return ret;
2064 
2065 	ret = smu_late_init(adev);
2066 	if (ret)
2067 		return ret;
2068 
2069 	return 0;
2070 }
2071 
2072 static int smu_suspend(void *handle)
2073 {
2074 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2075 	struct smu_context *smu = adev->powerplay.pp_handle;
2076 	int ret;
2077 	uint64_t count;
2078 
2079 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2080 		return 0;
2081 
2082 	if (!smu->pm_enabled)
2083 		return 0;
2084 
2085 	adev->pm.dpm_enabled = false;
2086 
2087 	ret = smu_smc_hw_cleanup(smu);
2088 	if (ret)
2089 		return ret;
2090 
2091 	smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2092 
2093 	smu_set_gfx_cgpg(smu, false);
2094 
2095 	/*
2096 	 * pwfw resets entrycount when device is suspended, so we save the
2097 	 * last value to be used when we resume to keep it consistent
2098 	 */
2099 	ret = smu_get_entrycount_gfxoff(smu, &count);
2100 	if (!ret)
2101 		adev->gfx.gfx_off_entrycount = count;
2102 
2103 	return 0;
2104 }
2105 
2106 static int smu_resume(void *handle)
2107 {
2108 	int ret;
2109 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2110 	struct smu_context *smu = adev->powerplay.pp_handle;
2111 
2112 	if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
2113 		return 0;
2114 
2115 	if (!smu->pm_enabled)
2116 		return 0;
2117 
2118 	dev_info(adev->dev, "SMU is resuming...\n");
2119 
2120 	ret = smu_start_smc_engine(smu);
2121 	if (ret) {
2122 		dev_err(adev->dev, "SMC engine is not correctly up!\n");
2123 		return ret;
2124 	}
2125 
2126 	ret = smu_smc_hw_setup(smu);
2127 	if (ret) {
2128 		dev_err(adev->dev, "Failed to setup smc hw!\n");
2129 		return ret;
2130 	}
2131 
2132 	ret = smu_set_gfx_imu_enable(smu);
2133 	if (ret)
2134 		return ret;
2135 
2136 	smu_set_gfx_cgpg(smu, true);
2137 
2138 	smu->disable_uclk_switch = 0;
2139 
2140 	adev->pm.dpm_enabled = true;
2141 
2142 	dev_info(adev->dev, "SMU is resumed successfully!\n");
2143 
2144 	return 0;
2145 }
2146 
2147 static int smu_display_configuration_change(void *handle,
2148 					    const struct amd_pp_display_configuration *display_config)
2149 {
2150 	struct smu_context *smu = handle;
2151 
2152 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2153 		return -EOPNOTSUPP;
2154 
2155 	if (!display_config)
2156 		return -EINVAL;
2157 
2158 	smu_set_min_dcef_deep_sleep(smu,
2159 				    display_config->min_dcef_deep_sleep_set_clk / 100);
2160 
2161 	return 0;
2162 }
2163 
2164 static int smu_set_clockgating_state(void *handle,
2165 				     enum amd_clockgating_state state)
2166 {
2167 	return 0;
2168 }
2169 
2170 static int smu_set_powergating_state(void *handle,
2171 				     enum amd_powergating_state state)
2172 {
2173 	return 0;
2174 }
2175 
2176 static int smu_enable_umd_pstate(void *handle,
2177 		      enum amd_dpm_forced_level *level)
2178 {
2179 	uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2180 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2181 					AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2182 					AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2183 
2184 	struct smu_context *smu = (struct smu_context*)(handle);
2185 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2186 
2187 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2188 		return -EINVAL;
2189 
2190 	if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2191 		/* enter umd pstate, save current level, disable gfx cg*/
2192 		if (*level & profile_mode_mask) {
2193 			smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2194 			smu_gpo_control(smu, false);
2195 			smu_gfx_ulv_control(smu, false);
2196 			smu_deep_sleep_control(smu, false);
2197 			amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2198 		}
2199 	} else {
2200 		/* exit umd pstate, restore level, enable gfx cg*/
2201 		if (!(*level & profile_mode_mask)) {
2202 			if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2203 				*level = smu_dpm_ctx->saved_dpm_level;
2204 			amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2205 			smu_deep_sleep_control(smu, true);
2206 			smu_gfx_ulv_control(smu, true);
2207 			smu_gpo_control(smu, true);
2208 		}
2209 	}
2210 
2211 	return 0;
2212 }
2213 
2214 static int smu_bump_power_profile_mode(struct smu_context *smu,
2215 					   long *param,
2216 					   uint32_t param_size)
2217 {
2218 	int ret = 0;
2219 
2220 	if (smu->ppt_funcs->set_power_profile_mode)
2221 		ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2222 
2223 	return ret;
2224 }
2225 
2226 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2227 				   enum amd_dpm_forced_level level,
2228 				   bool skip_display_settings)
2229 {
2230 	int ret = 0;
2231 	int index = 0;
2232 	long workload[1];
2233 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2234 
2235 	if (!skip_display_settings) {
2236 		ret = smu_display_config_changed(smu);
2237 		if (ret) {
2238 			dev_err(smu->adev->dev, "Failed to change display config!");
2239 			return ret;
2240 		}
2241 	}
2242 
2243 	ret = smu_apply_clocks_adjust_rules(smu);
2244 	if (ret) {
2245 		dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2246 		return ret;
2247 	}
2248 
2249 	if (!skip_display_settings) {
2250 		ret = smu_notify_smc_display_config(smu);
2251 		if (ret) {
2252 			dev_err(smu->adev->dev, "Failed to notify smc display config!");
2253 			return ret;
2254 		}
2255 	}
2256 
2257 	if (smu_dpm_ctx->dpm_level != level) {
2258 		ret = smu_asic_set_performance_level(smu, level);
2259 		if (ret) {
2260 			dev_err(smu->adev->dev, "Failed to set performance level!");
2261 			return ret;
2262 		}
2263 
2264 		/* update the saved copy */
2265 		smu_dpm_ctx->dpm_level = level;
2266 	}
2267 
2268 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2269 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2270 		index = fls(smu->workload_mask);
2271 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2272 		workload[0] = smu->workload_setting[index];
2273 
2274 		if (smu->power_profile_mode != workload[0])
2275 			smu_bump_power_profile_mode(smu, workload, 0);
2276 	}
2277 
2278 	return ret;
2279 }
2280 
2281 static int smu_handle_task(struct smu_context *smu,
2282 			   enum amd_dpm_forced_level level,
2283 			   enum amd_pp_task task_id)
2284 {
2285 	int ret = 0;
2286 
2287 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2288 		return -EOPNOTSUPP;
2289 
2290 	switch (task_id) {
2291 	case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2292 		ret = smu_pre_display_config_changed(smu);
2293 		if (ret)
2294 			return ret;
2295 		ret = smu_adjust_power_state_dynamic(smu, level, false);
2296 		break;
2297 	case AMD_PP_TASK_COMPLETE_INIT:
2298 	case AMD_PP_TASK_READJUST_POWER_STATE:
2299 		ret = smu_adjust_power_state_dynamic(smu, level, true);
2300 		break;
2301 	default:
2302 		break;
2303 	}
2304 
2305 	return ret;
2306 }
2307 
2308 static int smu_handle_dpm_task(void *handle,
2309 			       enum amd_pp_task task_id,
2310 			       enum amd_pm_state_type *user_state)
2311 {
2312 	struct smu_context *smu = handle;
2313 	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2314 
2315 	return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2316 
2317 }
2318 
2319 static int smu_switch_power_profile(void *handle,
2320 				    enum PP_SMC_POWER_PROFILE type,
2321 				    bool en)
2322 {
2323 	struct smu_context *smu = handle;
2324 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2325 	long workload[1];
2326 	uint32_t index;
2327 
2328 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2329 		return -EOPNOTSUPP;
2330 
2331 	if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2332 		return -EINVAL;
2333 
2334 	if (!en) {
2335 		smu->workload_mask &= ~(1 << smu->workload_prority[type]);
2336 		index = fls(smu->workload_mask);
2337 		index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2338 		workload[0] = smu->workload_setting[index];
2339 	} else {
2340 		smu->workload_mask |= (1 << smu->workload_prority[type]);
2341 		index = fls(smu->workload_mask);
2342 		index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
2343 		workload[0] = smu->workload_setting[index];
2344 	}
2345 
2346 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2347 		smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2348 		smu_bump_power_profile_mode(smu, workload, 0);
2349 
2350 	return 0;
2351 }
2352 
2353 static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2354 {
2355 	struct smu_context *smu = handle;
2356 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2357 
2358 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2359 		return -EOPNOTSUPP;
2360 
2361 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2362 		return -EINVAL;
2363 
2364 	return smu_dpm_ctx->dpm_level;
2365 }
2366 
2367 static int smu_force_performance_level(void *handle,
2368 				       enum amd_dpm_forced_level level)
2369 {
2370 	struct smu_context *smu = handle;
2371 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2372 	int ret = 0;
2373 
2374 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2375 		return -EOPNOTSUPP;
2376 
2377 	if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2378 		return -EINVAL;
2379 
2380 	ret = smu_enable_umd_pstate(smu, &level);
2381 	if (ret)
2382 		return ret;
2383 
2384 	ret = smu_handle_task(smu, level,
2385 			      AMD_PP_TASK_READJUST_POWER_STATE);
2386 
2387 	/* reset user dpm clock state */
2388 	if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2389 		memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2390 		smu->user_dpm_profile.clk_dependency = 0;
2391 	}
2392 
2393 	return ret;
2394 }
2395 
2396 static int smu_set_display_count(void *handle, uint32_t count)
2397 {
2398 	struct smu_context *smu = handle;
2399 
2400 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2401 		return -EOPNOTSUPP;
2402 
2403 	return smu_init_display_count(smu, count);
2404 }
2405 
2406 static int smu_force_smuclk_levels(struct smu_context *smu,
2407 			 enum smu_clk_type clk_type,
2408 			 uint32_t mask)
2409 {
2410 	struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2411 	int ret = 0;
2412 
2413 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2414 		return -EOPNOTSUPP;
2415 
2416 	if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2417 		dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2418 		return -EINVAL;
2419 	}
2420 
2421 	if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2422 		ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2423 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2424 			smu->user_dpm_profile.clk_mask[clk_type] = mask;
2425 			smu_set_user_clk_dependencies(smu, clk_type);
2426 		}
2427 	}
2428 
2429 	return ret;
2430 }
2431 
2432 static int smu_force_ppclk_levels(void *handle,
2433 				  enum pp_clock_type type,
2434 				  uint32_t mask)
2435 {
2436 	struct smu_context *smu = handle;
2437 	enum smu_clk_type clk_type;
2438 
2439 	switch (type) {
2440 	case PP_SCLK:
2441 		clk_type = SMU_SCLK; break;
2442 	case PP_MCLK:
2443 		clk_type = SMU_MCLK; break;
2444 	case PP_PCIE:
2445 		clk_type = SMU_PCIE; break;
2446 	case PP_SOCCLK:
2447 		clk_type = SMU_SOCCLK; break;
2448 	case PP_FCLK:
2449 		clk_type = SMU_FCLK; break;
2450 	case PP_DCEFCLK:
2451 		clk_type = SMU_DCEFCLK; break;
2452 	case PP_VCLK:
2453 		clk_type = SMU_VCLK; break;
2454 	case PP_VCLK1:
2455 		clk_type = SMU_VCLK1; break;
2456 	case PP_DCLK:
2457 		clk_type = SMU_DCLK; break;
2458 	case PP_DCLK1:
2459 		clk_type = SMU_DCLK1; break;
2460 	case OD_SCLK:
2461 		clk_type = SMU_OD_SCLK; break;
2462 	case OD_MCLK:
2463 		clk_type = SMU_OD_MCLK; break;
2464 	case OD_VDDC_CURVE:
2465 		clk_type = SMU_OD_VDDC_CURVE; break;
2466 	case OD_RANGE:
2467 		clk_type = SMU_OD_RANGE; break;
2468 	default:
2469 		return -EINVAL;
2470 	}
2471 
2472 	return smu_force_smuclk_levels(smu, clk_type, mask);
2473 }
2474 
2475 /*
2476  * On system suspending or resetting, the dpm_enabled
2477  * flag will be cleared. So that those SMU services which
2478  * are not supported will be gated.
2479  * However, the mp1 state setting should still be granted
2480  * even if the dpm_enabled cleared.
2481  */
2482 static int smu_set_mp1_state(void *handle,
2483 			     enum pp_mp1_state mp1_state)
2484 {
2485 	struct smu_context *smu = handle;
2486 	int ret = 0;
2487 
2488 	if (!smu->pm_enabled)
2489 		return -EOPNOTSUPP;
2490 
2491 	if (smu->ppt_funcs &&
2492 	    smu->ppt_funcs->set_mp1_state)
2493 		ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2494 
2495 	return ret;
2496 }
2497 
2498 static int smu_set_df_cstate(void *handle,
2499 			     enum pp_df_cstate state)
2500 {
2501 	struct smu_context *smu = handle;
2502 	int ret = 0;
2503 
2504 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2505 		return -EOPNOTSUPP;
2506 
2507 	if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2508 		return 0;
2509 
2510 	ret = smu->ppt_funcs->set_df_cstate(smu, state);
2511 	if (ret)
2512 		dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2513 
2514 	return ret;
2515 }
2516 
2517 int smu_write_watermarks_table(struct smu_context *smu)
2518 {
2519 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2520 		return -EOPNOTSUPP;
2521 
2522 	return smu_set_watermarks_table(smu, NULL);
2523 }
2524 
2525 static int smu_set_watermarks_for_clock_ranges(void *handle,
2526 					       struct pp_smu_wm_range_sets *clock_ranges)
2527 {
2528 	struct smu_context *smu = handle;
2529 
2530 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2531 		return -EOPNOTSUPP;
2532 
2533 	if (smu->disable_watermark)
2534 		return 0;
2535 
2536 	return smu_set_watermarks_table(smu, clock_ranges);
2537 }
2538 
2539 int smu_set_ac_dc(struct smu_context *smu)
2540 {
2541 	int ret = 0;
2542 
2543 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2544 		return -EOPNOTSUPP;
2545 
2546 	/* controlled by firmware */
2547 	if (smu->dc_controlled_by_gpio)
2548 		return 0;
2549 
2550 	ret = smu_set_power_source(smu,
2551 				   smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2552 				   SMU_POWER_SOURCE_DC);
2553 	if (ret)
2554 		dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2555 		       smu->adev->pm.ac_power ? "AC" : "DC");
2556 
2557 	return ret;
2558 }
2559 
2560 const struct amd_ip_funcs smu_ip_funcs = {
2561 	.name = "smu",
2562 	.early_init = smu_early_init,
2563 	.late_init = smu_late_init,
2564 	.sw_init = smu_sw_init,
2565 	.sw_fini = smu_sw_fini,
2566 	.hw_init = smu_hw_init,
2567 	.hw_fini = smu_hw_fini,
2568 	.late_fini = smu_late_fini,
2569 	.suspend = smu_suspend,
2570 	.resume = smu_resume,
2571 	.is_idle = NULL,
2572 	.check_soft_reset = NULL,
2573 	.wait_for_idle = NULL,
2574 	.soft_reset = NULL,
2575 	.set_clockgating_state = smu_set_clockgating_state,
2576 	.set_powergating_state = smu_set_powergating_state,
2577 };
2578 
2579 const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2580 	.type = AMD_IP_BLOCK_TYPE_SMC,
2581 	.major = 11,
2582 	.minor = 0,
2583 	.rev = 0,
2584 	.funcs = &smu_ip_funcs,
2585 };
2586 
2587 const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2588 	.type = AMD_IP_BLOCK_TYPE_SMC,
2589 	.major = 12,
2590 	.minor = 0,
2591 	.rev = 0,
2592 	.funcs = &smu_ip_funcs,
2593 };
2594 
2595 const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2596 	.type = AMD_IP_BLOCK_TYPE_SMC,
2597 	.major = 13,
2598 	.minor = 0,
2599 	.rev = 0,
2600 	.funcs = &smu_ip_funcs,
2601 };
2602 
2603 const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2604 	.type = AMD_IP_BLOCK_TYPE_SMC,
2605 	.major = 14,
2606 	.minor = 0,
2607 	.rev = 0,
2608 	.funcs = &smu_ip_funcs,
2609 };
2610 
2611 static int smu_load_microcode(void *handle)
2612 {
2613 	struct smu_context *smu = handle;
2614 	struct amdgpu_device *adev = smu->adev;
2615 	int ret = 0;
2616 
2617 	if (!smu->pm_enabled)
2618 		return -EOPNOTSUPP;
2619 
2620 	/* This should be used for non PSP loading */
2621 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2622 		return 0;
2623 
2624 	if (smu->ppt_funcs->load_microcode) {
2625 		ret = smu->ppt_funcs->load_microcode(smu);
2626 		if (ret) {
2627 			dev_err(adev->dev, "Load microcode failed\n");
2628 			return ret;
2629 		}
2630 	}
2631 
2632 	if (smu->ppt_funcs->check_fw_status) {
2633 		ret = smu->ppt_funcs->check_fw_status(smu);
2634 		if (ret) {
2635 			dev_err(adev->dev, "SMC is not ready\n");
2636 			return ret;
2637 		}
2638 	}
2639 
2640 	return ret;
2641 }
2642 
2643 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2644 {
2645 	int ret = 0;
2646 
2647 	if (smu->ppt_funcs->set_gfx_cgpg)
2648 		ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2649 
2650 	return ret;
2651 }
2652 
2653 static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2654 {
2655 	struct smu_context *smu = handle;
2656 	int ret = 0;
2657 
2658 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2659 		return -EOPNOTSUPP;
2660 
2661 	if (!smu->ppt_funcs->set_fan_speed_rpm)
2662 		return -EOPNOTSUPP;
2663 
2664 	if (speed == U32_MAX)
2665 		return -EINVAL;
2666 
2667 	ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2668 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2669 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2670 		smu->user_dpm_profile.fan_speed_rpm = speed;
2671 
2672 		/* Override custom PWM setting as they cannot co-exist */
2673 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2674 		smu->user_dpm_profile.fan_speed_pwm = 0;
2675 	}
2676 
2677 	return ret;
2678 }
2679 
2680 /**
2681  * smu_get_power_limit - Request one of the SMU Power Limits
2682  *
2683  * @handle: pointer to smu context
2684  * @limit: requested limit is written back to this variable
2685  * @pp_limit_level: &pp_power_limit_level which limit of the power to return
2686  * @pp_power_type: &pp_power_type type of power
2687  * Return:  0 on success, <0 on error
2688  *
2689  */
2690 int smu_get_power_limit(void *handle,
2691 			uint32_t *limit,
2692 			enum pp_power_limit_level pp_limit_level,
2693 			enum pp_power_type pp_power_type)
2694 {
2695 	struct smu_context *smu = handle;
2696 	struct amdgpu_device *adev = smu->adev;
2697 	enum smu_ppt_limit_level limit_level;
2698 	uint32_t limit_type;
2699 	int ret = 0;
2700 
2701 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2702 		return -EOPNOTSUPP;
2703 
2704 	switch (pp_power_type) {
2705 	case PP_PWR_TYPE_SUSTAINED:
2706 		limit_type = SMU_DEFAULT_PPT_LIMIT;
2707 		break;
2708 	case PP_PWR_TYPE_FAST:
2709 		limit_type = SMU_FAST_PPT_LIMIT;
2710 		break;
2711 	default:
2712 		return -EOPNOTSUPP;
2713 	}
2714 
2715 	switch (pp_limit_level) {
2716 	case PP_PWR_LIMIT_CURRENT:
2717 		limit_level = SMU_PPT_LIMIT_CURRENT;
2718 		break;
2719 	case PP_PWR_LIMIT_DEFAULT:
2720 		limit_level = SMU_PPT_LIMIT_DEFAULT;
2721 		break;
2722 	case PP_PWR_LIMIT_MAX:
2723 		limit_level = SMU_PPT_LIMIT_MAX;
2724 		break;
2725 	case PP_PWR_LIMIT_MIN:
2726 		limit_level = SMU_PPT_LIMIT_MIN;
2727 		break;
2728 	default:
2729 		return -EOPNOTSUPP;
2730 	}
2731 
2732 	if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2733 		if (smu->ppt_funcs->get_ppt_limit)
2734 			ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2735 	} else {
2736 		switch (limit_level) {
2737 		case SMU_PPT_LIMIT_CURRENT:
2738 			switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2739 			case IP_VERSION(13, 0, 2):
2740 			case IP_VERSION(13, 0, 6):
2741 			case IP_VERSION(13, 0, 14):
2742 			case IP_VERSION(11, 0, 7):
2743 			case IP_VERSION(11, 0, 11):
2744 			case IP_VERSION(11, 0, 12):
2745 			case IP_VERSION(11, 0, 13):
2746 				ret = smu_get_asic_power_limits(smu,
2747 								&smu->current_power_limit,
2748 								NULL, NULL, NULL);
2749 				break;
2750 			default:
2751 				break;
2752 			}
2753 			*limit = smu->current_power_limit;
2754 			break;
2755 		case SMU_PPT_LIMIT_DEFAULT:
2756 			*limit = smu->default_power_limit;
2757 			break;
2758 		case SMU_PPT_LIMIT_MAX:
2759 			*limit = smu->max_power_limit;
2760 			break;
2761 		case SMU_PPT_LIMIT_MIN:
2762 			*limit = smu->min_power_limit;
2763 			break;
2764 		default:
2765 			return -EINVAL;
2766 		}
2767 	}
2768 
2769 	return ret;
2770 }
2771 
2772 static int smu_set_power_limit(void *handle, uint32_t limit)
2773 {
2774 	struct smu_context *smu = handle;
2775 	uint32_t limit_type = limit >> 24;
2776 	int ret = 0;
2777 
2778 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2779 		return -EOPNOTSUPP;
2780 
2781 	limit &= (1<<24)-1;
2782 	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2783 		if (smu->ppt_funcs->set_power_limit)
2784 			return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2785 
2786 	if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2787 		dev_err(smu->adev->dev,
2788 			"New power limit (%d) is out of range [%d,%d]\n",
2789 			limit, smu->min_power_limit, smu->max_power_limit);
2790 		return -EINVAL;
2791 	}
2792 
2793 	if (!limit)
2794 		limit = smu->current_power_limit;
2795 
2796 	if (smu->ppt_funcs->set_power_limit) {
2797 		ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2798 		if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2799 			smu->user_dpm_profile.power_limit = limit;
2800 	}
2801 
2802 	return ret;
2803 }
2804 
2805 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2806 {
2807 	int ret = 0;
2808 
2809 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2810 		return -EOPNOTSUPP;
2811 
2812 	if (smu->ppt_funcs->print_clk_levels)
2813 		ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2814 
2815 	return ret;
2816 }
2817 
2818 static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2819 {
2820 	enum smu_clk_type clk_type;
2821 
2822 	switch (type) {
2823 	case PP_SCLK:
2824 		clk_type = SMU_SCLK; break;
2825 	case PP_MCLK:
2826 		clk_type = SMU_MCLK; break;
2827 	case PP_PCIE:
2828 		clk_type = SMU_PCIE; break;
2829 	case PP_SOCCLK:
2830 		clk_type = SMU_SOCCLK; break;
2831 	case PP_FCLK:
2832 		clk_type = SMU_FCLK; break;
2833 	case PP_DCEFCLK:
2834 		clk_type = SMU_DCEFCLK; break;
2835 	case PP_VCLK:
2836 		clk_type = SMU_VCLK; break;
2837 	case PP_VCLK1:
2838 		clk_type = SMU_VCLK1; break;
2839 	case PP_DCLK:
2840 		clk_type = SMU_DCLK; break;
2841 	case PP_DCLK1:
2842 		clk_type = SMU_DCLK1; break;
2843 	case OD_SCLK:
2844 		clk_type = SMU_OD_SCLK; break;
2845 	case OD_MCLK:
2846 		clk_type = SMU_OD_MCLK; break;
2847 	case OD_VDDC_CURVE:
2848 		clk_type = SMU_OD_VDDC_CURVE; break;
2849 	case OD_RANGE:
2850 		clk_type = SMU_OD_RANGE; break;
2851 	case OD_VDDGFX_OFFSET:
2852 		clk_type = SMU_OD_VDDGFX_OFFSET; break;
2853 	case OD_CCLK:
2854 		clk_type = SMU_OD_CCLK; break;
2855 	case OD_FAN_CURVE:
2856 		clk_type = SMU_OD_FAN_CURVE; break;
2857 	case OD_ACOUSTIC_LIMIT:
2858 		clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
2859 	case OD_ACOUSTIC_TARGET:
2860 		clk_type = SMU_OD_ACOUSTIC_TARGET; break;
2861 	case OD_FAN_TARGET_TEMPERATURE:
2862 		clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
2863 	case OD_FAN_MINIMUM_PWM:
2864 		clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
2865 	default:
2866 		clk_type = SMU_CLK_COUNT; break;
2867 	}
2868 
2869 	return clk_type;
2870 }
2871 
2872 static int smu_print_ppclk_levels(void *handle,
2873 				  enum pp_clock_type type,
2874 				  char *buf)
2875 {
2876 	struct smu_context *smu = handle;
2877 	enum smu_clk_type clk_type;
2878 
2879 	clk_type = smu_convert_to_smuclk(type);
2880 	if (clk_type == SMU_CLK_COUNT)
2881 		return -EINVAL;
2882 
2883 	return smu_print_smuclk_levels(smu, clk_type, buf);
2884 }
2885 
2886 static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
2887 {
2888 	struct smu_context *smu = handle;
2889 	enum smu_clk_type clk_type;
2890 
2891 	clk_type = smu_convert_to_smuclk(type);
2892 	if (clk_type == SMU_CLK_COUNT)
2893 		return -EINVAL;
2894 
2895 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2896 		return -EOPNOTSUPP;
2897 
2898 	if (!smu->ppt_funcs->emit_clk_levels)
2899 		return -ENOENT;
2900 
2901 	return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2902 
2903 }
2904 
2905 static int smu_od_edit_dpm_table(void *handle,
2906 				 enum PP_OD_DPM_TABLE_COMMAND type,
2907 				 long *input, uint32_t size)
2908 {
2909 	struct smu_context *smu = handle;
2910 	int ret = 0;
2911 
2912 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2913 		return -EOPNOTSUPP;
2914 
2915 	if (smu->ppt_funcs->od_edit_dpm_table) {
2916 		ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2917 	}
2918 
2919 	return ret;
2920 }
2921 
2922 static int smu_read_sensor(void *handle,
2923 			   int sensor,
2924 			   void *data,
2925 			   int *size_arg)
2926 {
2927 	struct smu_context *smu = handle;
2928 	struct smu_umd_pstate_table *pstate_table =
2929 				&smu->pstate_table;
2930 	int ret = 0;
2931 	uint32_t *size, size_val;
2932 
2933 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2934 		return -EOPNOTSUPP;
2935 
2936 	if (!data || !size_arg)
2937 		return -EINVAL;
2938 
2939 	size_val = *size_arg;
2940 	size = &size_val;
2941 
2942 	if (smu->ppt_funcs->read_sensor)
2943 		if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2944 			goto unlock;
2945 
2946 	switch (sensor) {
2947 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
2948 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
2949 		*size = 4;
2950 		break;
2951 	case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
2952 		*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
2953 		*size = 4;
2954 		break;
2955 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
2956 		*((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
2957 		*size = 4;
2958 		break;
2959 	case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
2960 		*((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
2961 		*size = 4;
2962 		break;
2963 	case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
2964 		ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2965 		*size = 8;
2966 		break;
2967 	case AMDGPU_PP_SENSOR_UVD_POWER:
2968 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2969 		*size = 4;
2970 		break;
2971 	case AMDGPU_PP_SENSOR_VCE_POWER:
2972 		*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2973 		*size = 4;
2974 		break;
2975 	case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
2976 		*(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
2977 		*size = 4;
2978 		break;
2979 	case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
2980 		*(uint32_t *)data = 0;
2981 		*size = 4;
2982 		break;
2983 	default:
2984 		*size = 0;
2985 		ret = -EOPNOTSUPP;
2986 		break;
2987 	}
2988 
2989 unlock:
2990 	// assign uint32_t to int
2991 	*size_arg = size_val;
2992 
2993 	return ret;
2994 }
2995 
2996 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
2997 {
2998 	int ret = -EOPNOTSUPP;
2999 	struct smu_context *smu = handle;
3000 
3001 	if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3002 		ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3003 
3004 	return ret;
3005 }
3006 
3007 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3008 {
3009 	int ret = -EOPNOTSUPP;
3010 	struct smu_context *smu = handle;
3011 
3012 	if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3013 		ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3014 
3015 	return ret;
3016 }
3017 
3018 static int smu_get_power_profile_mode(void *handle, char *buf)
3019 {
3020 	struct smu_context *smu = handle;
3021 
3022 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3023 	    !smu->ppt_funcs->get_power_profile_mode)
3024 		return -EOPNOTSUPP;
3025 	if (!buf)
3026 		return -EINVAL;
3027 
3028 	return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3029 }
3030 
3031 static int smu_set_power_profile_mode(void *handle,
3032 				      long *param,
3033 				      uint32_t param_size)
3034 {
3035 	struct smu_context *smu = handle;
3036 
3037 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3038 	    !smu->ppt_funcs->set_power_profile_mode)
3039 		return -EOPNOTSUPP;
3040 
3041 	return smu_bump_power_profile_mode(smu, param, param_size);
3042 }
3043 
3044 static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3045 {
3046 	struct smu_context *smu = handle;
3047 
3048 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3049 		return -EOPNOTSUPP;
3050 
3051 	if (!smu->ppt_funcs->get_fan_control_mode)
3052 		return -EOPNOTSUPP;
3053 
3054 	if (!fan_mode)
3055 		return -EINVAL;
3056 
3057 	*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3058 
3059 	return 0;
3060 }
3061 
3062 static int smu_set_fan_control_mode(void *handle, u32 value)
3063 {
3064 	struct smu_context *smu = handle;
3065 	int ret = 0;
3066 
3067 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3068 		return -EOPNOTSUPP;
3069 
3070 	if (!smu->ppt_funcs->set_fan_control_mode)
3071 		return -EOPNOTSUPP;
3072 
3073 	if (value == U32_MAX)
3074 		return -EINVAL;
3075 
3076 	ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3077 	if (ret)
3078 		goto out;
3079 
3080 	if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3081 		smu->user_dpm_profile.fan_mode = value;
3082 
3083 		/* reset user dpm fan speed */
3084 		if (value != AMD_FAN_CTRL_MANUAL) {
3085 			smu->user_dpm_profile.fan_speed_pwm = 0;
3086 			smu->user_dpm_profile.fan_speed_rpm = 0;
3087 			smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3088 		}
3089 	}
3090 
3091 out:
3092 	return ret;
3093 }
3094 
3095 static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3096 {
3097 	struct smu_context *smu = handle;
3098 	int ret = 0;
3099 
3100 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3101 		return -EOPNOTSUPP;
3102 
3103 	if (!smu->ppt_funcs->get_fan_speed_pwm)
3104 		return -EOPNOTSUPP;
3105 
3106 	if (!speed)
3107 		return -EINVAL;
3108 
3109 	ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3110 
3111 	return ret;
3112 }
3113 
3114 static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3115 {
3116 	struct smu_context *smu = handle;
3117 	int ret = 0;
3118 
3119 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3120 		return -EOPNOTSUPP;
3121 
3122 	if (!smu->ppt_funcs->set_fan_speed_pwm)
3123 		return -EOPNOTSUPP;
3124 
3125 	if (speed == U32_MAX)
3126 		return -EINVAL;
3127 
3128 	ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3129 	if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3130 		smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3131 		smu->user_dpm_profile.fan_speed_pwm = speed;
3132 
3133 		/* Override custom RPM setting as they cannot co-exist */
3134 		smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3135 		smu->user_dpm_profile.fan_speed_rpm = 0;
3136 	}
3137 
3138 	return ret;
3139 }
3140 
3141 static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3142 {
3143 	struct smu_context *smu = handle;
3144 	int ret = 0;
3145 
3146 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3147 		return -EOPNOTSUPP;
3148 
3149 	if (!smu->ppt_funcs->get_fan_speed_rpm)
3150 		return -EOPNOTSUPP;
3151 
3152 	if (!speed)
3153 		return -EINVAL;
3154 
3155 	ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3156 
3157 	return ret;
3158 }
3159 
3160 static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3161 {
3162 	struct smu_context *smu = handle;
3163 
3164 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3165 		return -EOPNOTSUPP;
3166 
3167 	return smu_set_min_dcef_deep_sleep(smu, clk);
3168 }
3169 
3170 static int smu_get_clock_by_type_with_latency(void *handle,
3171 					      enum amd_pp_clock_type type,
3172 					      struct pp_clock_levels_with_latency *clocks)
3173 {
3174 	struct smu_context *smu = handle;
3175 	enum smu_clk_type clk_type;
3176 	int ret = 0;
3177 
3178 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3179 		return -EOPNOTSUPP;
3180 
3181 	if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3182 		switch (type) {
3183 		case amd_pp_sys_clock:
3184 			clk_type = SMU_GFXCLK;
3185 			break;
3186 		case amd_pp_mem_clock:
3187 			clk_type = SMU_MCLK;
3188 			break;
3189 		case amd_pp_dcef_clock:
3190 			clk_type = SMU_DCEFCLK;
3191 			break;
3192 		case amd_pp_disp_clock:
3193 			clk_type = SMU_DISPCLK;
3194 			break;
3195 		default:
3196 			dev_err(smu->adev->dev, "Invalid clock type!\n");
3197 			return -EINVAL;
3198 		}
3199 
3200 		ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3201 	}
3202 
3203 	return ret;
3204 }
3205 
3206 static int smu_display_clock_voltage_request(void *handle,
3207 					     struct pp_display_clock_request *clock_req)
3208 {
3209 	struct smu_context *smu = handle;
3210 	int ret = 0;
3211 
3212 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3213 		return -EOPNOTSUPP;
3214 
3215 	if (smu->ppt_funcs->display_clock_voltage_request)
3216 		ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3217 
3218 	return ret;
3219 }
3220 
3221 
3222 static int smu_display_disable_memory_clock_switch(void *handle,
3223 						   bool disable_memory_clock_switch)
3224 {
3225 	struct smu_context *smu = handle;
3226 	int ret = -EINVAL;
3227 
3228 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3229 		return -EOPNOTSUPP;
3230 
3231 	if (smu->ppt_funcs->display_disable_memory_clock_switch)
3232 		ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3233 
3234 	return ret;
3235 }
3236 
3237 static int smu_set_xgmi_pstate(void *handle,
3238 			       uint32_t pstate)
3239 {
3240 	struct smu_context *smu = handle;
3241 	int ret = 0;
3242 
3243 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3244 		return -EOPNOTSUPP;
3245 
3246 	if (smu->ppt_funcs->set_xgmi_pstate)
3247 		ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3248 
3249 	if (ret)
3250 		dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3251 
3252 	return ret;
3253 }
3254 
3255 static int smu_get_baco_capability(void *handle)
3256 {
3257 	struct smu_context *smu = handle;
3258 
3259 	if (!smu->pm_enabled)
3260 		return false;
3261 
3262 	if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3263 		return false;
3264 
3265 	return smu->ppt_funcs->get_bamaco_support(smu);
3266 }
3267 
3268 static int smu_baco_set_state(void *handle, int state)
3269 {
3270 	struct smu_context *smu = handle;
3271 	int ret = 0;
3272 
3273 	if (!smu->pm_enabled)
3274 		return -EOPNOTSUPP;
3275 
3276 	if (state == 0) {
3277 		if (smu->ppt_funcs->baco_exit)
3278 			ret = smu->ppt_funcs->baco_exit(smu);
3279 	} else if (state == 1) {
3280 		if (smu->ppt_funcs->baco_enter)
3281 			ret = smu->ppt_funcs->baco_enter(smu);
3282 	} else {
3283 		return -EINVAL;
3284 	}
3285 
3286 	if (ret)
3287 		dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3288 				(state)?"enter":"exit");
3289 
3290 	return ret;
3291 }
3292 
3293 bool smu_mode1_reset_is_support(struct smu_context *smu)
3294 {
3295 	bool ret = false;
3296 
3297 	if (!smu->pm_enabled)
3298 		return false;
3299 
3300 	if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3301 		ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3302 
3303 	return ret;
3304 }
3305 
3306 bool smu_mode2_reset_is_support(struct smu_context *smu)
3307 {
3308 	bool ret = false;
3309 
3310 	if (!smu->pm_enabled)
3311 		return false;
3312 
3313 	if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
3314 		ret = smu->ppt_funcs->mode2_reset_is_support(smu);
3315 
3316 	return ret;
3317 }
3318 
3319 int smu_mode1_reset(struct smu_context *smu)
3320 {
3321 	int ret = 0;
3322 
3323 	if (!smu->pm_enabled)
3324 		return -EOPNOTSUPP;
3325 
3326 	if (smu->ppt_funcs->mode1_reset)
3327 		ret = smu->ppt_funcs->mode1_reset(smu);
3328 
3329 	return ret;
3330 }
3331 
3332 static int smu_mode2_reset(void *handle)
3333 {
3334 	struct smu_context *smu = handle;
3335 	int ret = 0;
3336 
3337 	if (!smu->pm_enabled)
3338 		return -EOPNOTSUPP;
3339 
3340 	if (smu->ppt_funcs->mode2_reset)
3341 		ret = smu->ppt_funcs->mode2_reset(smu);
3342 
3343 	if (ret)
3344 		dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3345 
3346 	return ret;
3347 }
3348 
3349 static int smu_enable_gfx_features(void *handle)
3350 {
3351 	struct smu_context *smu = handle;
3352 	int ret = 0;
3353 
3354 	if (!smu->pm_enabled)
3355 		return -EOPNOTSUPP;
3356 
3357 	if (smu->ppt_funcs->enable_gfx_features)
3358 		ret = smu->ppt_funcs->enable_gfx_features(smu);
3359 
3360 	if (ret)
3361 		dev_err(smu->adev->dev, "enable gfx features failed!\n");
3362 
3363 	return ret;
3364 }
3365 
3366 static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3367 						struct pp_smu_nv_clock_table *max_clocks)
3368 {
3369 	struct smu_context *smu = handle;
3370 	int ret = 0;
3371 
3372 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3373 		return -EOPNOTSUPP;
3374 
3375 	if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3376 		ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3377 
3378 	return ret;
3379 }
3380 
3381 static int smu_get_uclk_dpm_states(void *handle,
3382 				   unsigned int *clock_values_in_khz,
3383 				   unsigned int *num_states)
3384 {
3385 	struct smu_context *smu = handle;
3386 	int ret = 0;
3387 
3388 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3389 		return -EOPNOTSUPP;
3390 
3391 	if (smu->ppt_funcs->get_uclk_dpm_states)
3392 		ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3393 
3394 	return ret;
3395 }
3396 
3397 static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3398 {
3399 	struct smu_context *smu = handle;
3400 	enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3401 
3402 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3403 		return -EOPNOTSUPP;
3404 
3405 	if (smu->ppt_funcs->get_current_power_state)
3406 		pm_state = smu->ppt_funcs->get_current_power_state(smu);
3407 
3408 	return pm_state;
3409 }
3410 
3411 static int smu_get_dpm_clock_table(void *handle,
3412 				   struct dpm_clocks *clock_table)
3413 {
3414 	struct smu_context *smu = handle;
3415 	int ret = 0;
3416 
3417 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3418 		return -EOPNOTSUPP;
3419 
3420 	if (smu->ppt_funcs->get_dpm_clock_table)
3421 		ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3422 
3423 	return ret;
3424 }
3425 
3426 static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3427 {
3428 	struct smu_context *smu = handle;
3429 
3430 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3431 		return -EOPNOTSUPP;
3432 
3433 	if (!smu->ppt_funcs->get_gpu_metrics)
3434 		return -EOPNOTSUPP;
3435 
3436 	return smu->ppt_funcs->get_gpu_metrics(smu, table);
3437 }
3438 
3439 static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3440 				      size_t size)
3441 {
3442 	struct smu_context *smu = handle;
3443 
3444 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3445 		return -EOPNOTSUPP;
3446 
3447 	if (!smu->ppt_funcs->get_pm_metrics)
3448 		return -EOPNOTSUPP;
3449 
3450 	return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3451 }
3452 
3453 static int smu_enable_mgpu_fan_boost(void *handle)
3454 {
3455 	struct smu_context *smu = handle;
3456 	int ret = 0;
3457 
3458 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3459 		return -EOPNOTSUPP;
3460 
3461 	if (smu->ppt_funcs->enable_mgpu_fan_boost)
3462 		ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3463 
3464 	return ret;
3465 }
3466 
3467 static int smu_gfx_state_change_set(void *handle,
3468 				    uint32_t state)
3469 {
3470 	struct smu_context *smu = handle;
3471 	int ret = 0;
3472 
3473 	if (smu->ppt_funcs->gfx_state_change_set)
3474 		ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3475 
3476 	return ret;
3477 }
3478 
3479 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3480 {
3481 	int ret = 0;
3482 
3483 	if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3484 		ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3485 
3486 	return ret;
3487 }
3488 
3489 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3490 {
3491 	int ret = -EOPNOTSUPP;
3492 
3493 	if (smu->ppt_funcs &&
3494 		smu->ppt_funcs->get_ecc_info)
3495 		ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3496 
3497 	return ret;
3498 
3499 }
3500 
3501 static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3502 {
3503 	struct smu_context *smu = handle;
3504 	struct smu_table_context *smu_table = &smu->smu_table;
3505 	struct smu_table *memory_pool = &smu_table->memory_pool;
3506 
3507 	if (!addr || !size)
3508 		return -EINVAL;
3509 
3510 	*addr = NULL;
3511 	*size = 0;
3512 	if (memory_pool->bo) {
3513 		*addr = memory_pool->cpu_addr;
3514 		*size = memory_pool->size;
3515 	}
3516 
3517 	return 0;
3518 }
3519 
3520 static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3521 				 size_t *size)
3522 {
3523 	size_t offset = *size;
3524 	int level;
3525 
3526 	for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3527 		if (level == policy->current_level)
3528 			offset += sysfs_emit_at(sysbuf, offset,
3529 				"%d : %s*\n", level,
3530 				policy->desc->get_desc(policy, level));
3531 		else
3532 			offset += sysfs_emit_at(sysbuf, offset,
3533 				"%d : %s\n", level,
3534 				policy->desc->get_desc(policy, level));
3535 	}
3536 
3537 	*size = offset;
3538 }
3539 
3540 ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3541 			       enum pp_pm_policy p_type, char *sysbuf)
3542 {
3543 	struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3544 	struct smu_dpm_policy_ctxt *policy_ctxt;
3545 	struct smu_dpm_policy *dpm_policy;
3546 	size_t offset = 0;
3547 
3548 	policy_ctxt = dpm_ctxt->dpm_policies;
3549 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3550 	    !policy_ctxt->policy_mask)
3551 		return -EOPNOTSUPP;
3552 
3553 	if (p_type == PP_PM_POLICY_NONE)
3554 		return -EINVAL;
3555 
3556 	dpm_policy = smu_get_pm_policy(smu, p_type);
3557 	if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3558 		return -ENOENT;
3559 
3560 	if (!sysbuf)
3561 		return -EINVAL;
3562 
3563 	smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3564 
3565 	return offset;
3566 }
3567 
3568 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3569 					 enum pp_pm_policy p_type)
3570 {
3571 	struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3572 	struct smu_dpm_policy_ctxt *policy_ctxt;
3573 	int i;
3574 
3575 	policy_ctxt = dpm_ctxt->dpm_policies;
3576 	if (!policy_ctxt)
3577 		return NULL;
3578 
3579 	for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3580 		if (policy_ctxt->policies[i].policy_type == p_type)
3581 			return &policy_ctxt->policies[i];
3582 	}
3583 
3584 	return NULL;
3585 }
3586 
3587 int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3588 		      int level)
3589 {
3590 	struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3591 	struct smu_dpm_policy *dpm_policy = NULL;
3592 	struct smu_dpm_policy_ctxt *policy_ctxt;
3593 	int ret = -EOPNOTSUPP;
3594 
3595 	policy_ctxt = dpm_ctxt->dpm_policies;
3596 	if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3597 	    !policy_ctxt->policy_mask)
3598 		return ret;
3599 
3600 	if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3601 		return -EINVAL;
3602 
3603 	dpm_policy = smu_get_pm_policy(smu, p_type);
3604 
3605 	if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3606 		return ret;
3607 
3608 	if (dpm_policy->current_level == level)
3609 		return 0;
3610 
3611 	ret = dpm_policy->set_policy(smu, level);
3612 
3613 	if (!ret)
3614 		dpm_policy->current_level = level;
3615 
3616 	return ret;
3617 }
3618 
3619 static const struct amd_pm_funcs swsmu_pm_funcs = {
3620 	/* export for sysfs */
3621 	.set_fan_control_mode    = smu_set_fan_control_mode,
3622 	.get_fan_control_mode    = smu_get_fan_control_mode,
3623 	.set_fan_speed_pwm   = smu_set_fan_speed_pwm,
3624 	.get_fan_speed_pwm   = smu_get_fan_speed_pwm,
3625 	.force_clock_level       = smu_force_ppclk_levels,
3626 	.print_clock_levels      = smu_print_ppclk_levels,
3627 	.emit_clock_levels       = smu_emit_ppclk_levels,
3628 	.force_performance_level = smu_force_performance_level,
3629 	.read_sensor             = smu_read_sensor,
3630 	.get_apu_thermal_limit       = smu_get_apu_thermal_limit,
3631 	.set_apu_thermal_limit       = smu_set_apu_thermal_limit,
3632 	.get_performance_level   = smu_get_performance_level,
3633 	.get_current_power_state = smu_get_current_power_state,
3634 	.get_fan_speed_rpm       = smu_get_fan_speed_rpm,
3635 	.set_fan_speed_rpm       = smu_set_fan_speed_rpm,
3636 	.get_pp_num_states       = smu_get_power_num_states,
3637 	.get_pp_table            = smu_sys_get_pp_table,
3638 	.set_pp_table            = smu_sys_set_pp_table,
3639 	.switch_power_profile    = smu_switch_power_profile,
3640 	/* export to amdgpu */
3641 	.dispatch_tasks          = smu_handle_dpm_task,
3642 	.load_firmware           = smu_load_microcode,
3643 	.set_powergating_by_smu  = smu_dpm_set_power_gate,
3644 	.set_power_limit         = smu_set_power_limit,
3645 	.get_power_limit         = smu_get_power_limit,
3646 	.get_power_profile_mode  = smu_get_power_profile_mode,
3647 	.set_power_profile_mode  = smu_set_power_profile_mode,
3648 	.odn_edit_dpm_table      = smu_od_edit_dpm_table,
3649 	.set_mp1_state           = smu_set_mp1_state,
3650 	.gfx_state_change_set    = smu_gfx_state_change_set,
3651 	/* export to DC */
3652 	.get_sclk                         = smu_get_sclk,
3653 	.get_mclk                         = smu_get_mclk,
3654 	.display_configuration_change     = smu_display_configuration_change,
3655 	.get_clock_by_type_with_latency   = smu_get_clock_by_type_with_latency,
3656 	.display_clock_voltage_request    = smu_display_clock_voltage_request,
3657 	.enable_mgpu_fan_boost            = smu_enable_mgpu_fan_boost,
3658 	.set_active_display_count         = smu_set_display_count,
3659 	.set_min_deep_sleep_dcefclk       = smu_set_deep_sleep_dcefclk,
3660 	.get_asic_baco_capability         = smu_get_baco_capability,
3661 	.set_asic_baco_state              = smu_baco_set_state,
3662 	.get_ppfeature_status             = smu_sys_get_pp_feature_mask,
3663 	.set_ppfeature_status             = smu_sys_set_pp_feature_mask,
3664 	.asic_reset_mode_2                = smu_mode2_reset,
3665 	.asic_reset_enable_gfx_features   = smu_enable_gfx_features,
3666 	.set_df_cstate                    = smu_set_df_cstate,
3667 	.set_xgmi_pstate                  = smu_set_xgmi_pstate,
3668 	.get_gpu_metrics                  = smu_sys_get_gpu_metrics,
3669 	.get_pm_metrics                   = smu_sys_get_pm_metrics,
3670 	.set_watermarks_for_clock_ranges     = smu_set_watermarks_for_clock_ranges,
3671 	.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3672 	.get_max_sustainable_clocks_by_dc    = smu_get_max_sustainable_clocks_by_dc,
3673 	.get_uclk_dpm_states              = smu_get_uclk_dpm_states,
3674 	.get_dpm_clock_table              = smu_get_dpm_clock_table,
3675 	.get_smu_prv_buf_details = smu_get_prv_buffer_details,
3676 };
3677 
3678 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3679 		       uint64_t event_arg)
3680 {
3681 	int ret = -EINVAL;
3682 
3683 	if (smu->ppt_funcs->wait_for_event)
3684 		ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3685 
3686 	return ret;
3687 }
3688 
3689 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3690 {
3691 
3692 	if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3693 		return -EOPNOTSUPP;
3694 
3695 	/* Confirm the buffer allocated is of correct size */
3696 	if (size != smu->stb_context.stb_buf_size)
3697 		return -EINVAL;
3698 
3699 	/*
3700 	 * No need to lock smu mutex as we access STB directly through MMIO
3701 	 * and not going through SMU messaging route (for now at least).
3702 	 * For registers access rely on implementation internal locking.
3703 	 */
3704 	return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3705 }
3706 
3707 #if defined(CONFIG_DEBUG_FS)
3708 
3709 static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3710 {
3711 	struct amdgpu_device *adev = filp->f_inode->i_private;
3712 	struct smu_context *smu = adev->powerplay.pp_handle;
3713 	unsigned char *buf;
3714 	int r;
3715 
3716 	buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3717 	if (!buf)
3718 		return -ENOMEM;
3719 
3720 	r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3721 	if (r)
3722 		goto out;
3723 
3724 	filp->private_data = buf;
3725 
3726 	return 0;
3727 
3728 out:
3729 	kvfree(buf);
3730 	return r;
3731 }
3732 
3733 static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3734 				loff_t *pos)
3735 {
3736 	struct amdgpu_device *adev = filp->f_inode->i_private;
3737 	struct smu_context *smu = adev->powerplay.pp_handle;
3738 
3739 
3740 	if (!filp->private_data)
3741 		return -EINVAL;
3742 
3743 	return simple_read_from_buffer(buf,
3744 				       size,
3745 				       pos, filp->private_data,
3746 				       smu->stb_context.stb_buf_size);
3747 }
3748 
3749 static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3750 {
3751 	kvfree(filp->private_data);
3752 	filp->private_data = NULL;
3753 
3754 	return 0;
3755 }
3756 
3757 /*
3758  * We have to define not only read method but also
3759  * open and release because .read takes up to PAGE_SIZE
3760  * data each time so and so is invoked multiple times.
3761  *  We allocate the STB buffer in .open and release it
3762  *  in .release
3763  */
3764 static const struct file_operations smu_stb_debugfs_fops = {
3765 	.owner = THIS_MODULE,
3766 	.open = smu_stb_debugfs_open,
3767 	.read = smu_stb_debugfs_read,
3768 	.release = smu_stb_debugfs_release,
3769 	.llseek = default_llseek,
3770 };
3771 
3772 #endif
3773 
3774 void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
3775 {
3776 #if defined(CONFIG_DEBUG_FS)
3777 
3778 	struct smu_context *smu = adev->powerplay.pp_handle;
3779 
3780 	if (!smu || (!smu->stb_context.stb_buf_size))
3781 		return;
3782 
3783 	debugfs_create_file_size("amdgpu_smu_stb_dump",
3784 			    S_IRUSR,
3785 			    adev_to_drm(adev)->primary->debugfs_root,
3786 			    adev,
3787 			    &smu_stb_debugfs_fops,
3788 			    smu->stb_context.stb_buf_size);
3789 #endif
3790 }
3791 
3792 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3793 {
3794 	int ret = 0;
3795 
3796 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3797 		ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3798 
3799 	return ret;
3800 }
3801 
3802 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3803 {
3804 	int ret = 0;
3805 
3806 	if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3807 		ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3808 
3809 	return ret;
3810 }
3811 
3812 int smu_send_rma_reason(struct smu_context *smu)
3813 {
3814 	int ret = 0;
3815 
3816 	if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
3817 		ret = smu->ppt_funcs->send_rma_reason(smu);
3818 
3819 	return ret;
3820 }
3821