xref: /linux/drivers/gpu/drm/amd/pm/amdgpu_pm.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Rafał Miłecki <zajec5@gmail.com>
23  *          Alex Deucher <alexdeucher@gmail.com>
24  */
25 
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <asm/processor.h>
37 
38 #define MAX_NUM_OF_FEATURES_PER_SUBSET		8
39 #define MAX_NUM_OF_SUBSETS			8
40 
41 struct od_attribute {
42 	struct kobj_attribute	attribute;
43 	struct list_head	entry;
44 };
45 
46 struct od_kobj {
47 	struct kobject		kobj;
48 	struct list_head	entry;
49 	struct list_head	attribute;
50 	void			*priv;
51 };
52 
53 struct od_feature_ops {
54 	umode_t (*is_visible)(struct amdgpu_device *adev);
55 	ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
56 			char *buf);
57 	ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
58 			 const char *buf, size_t count);
59 };
60 
61 struct od_feature_item {
62 	const char		*name;
63 	struct od_feature_ops	ops;
64 };
65 
66 struct od_feature_container {
67 	char				*name;
68 	struct od_feature_ops		ops;
69 	struct od_feature_item		sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
70 };
71 
72 struct od_feature_set {
73 	struct od_feature_container	containers[MAX_NUM_OF_SUBSETS];
74 };
75 
76 static const struct hwmon_temp_label {
77 	enum PP_HWMON_TEMP channel;
78 	const char *label;
79 } temp_label[] = {
80 	{PP_TEMP_EDGE, "edge"},
81 	{PP_TEMP_JUNCTION, "junction"},
82 	{PP_TEMP_MEM, "mem"},
83 };
84 
85 const char * const amdgpu_pp_profile_name[] = {
86 	"BOOTUP_DEFAULT",
87 	"3D_FULL_SCREEN",
88 	"POWER_SAVING",
89 	"VIDEO",
90 	"VR",
91 	"COMPUTE",
92 	"CUSTOM",
93 	"WINDOW_3D",
94 	"CAPPED",
95 	"UNCAPPED",
96 };
97 
98 /**
99  * DOC: power_dpm_state
100  *
101  * The power_dpm_state file is a legacy interface and is only provided for
102  * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
103  * certain power related parameters.  The file power_dpm_state is used for this.
104  * It accepts the following arguments:
105  *
106  * - battery
107  *
108  * - balanced
109  *
110  * - performance
111  *
112  * battery
113  *
114  * On older GPUs, the vbios provided a special power state for battery
115  * operation.  Selecting battery switched to this state.  This is no
116  * longer provided on newer GPUs so the option does nothing in that case.
117  *
118  * balanced
119  *
120  * On older GPUs, the vbios provided a special power state for balanced
121  * operation.  Selecting balanced switched to this state.  This is no
122  * longer provided on newer GPUs so the option does nothing in that case.
123  *
124  * performance
125  *
126  * On older GPUs, the vbios provided a special power state for performance
127  * operation.  Selecting performance switched to this state.  This is no
128  * longer provided on newer GPUs so the option does nothing in that case.
129  *
130  */
131 
132 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
133 					  struct device_attribute *attr,
134 					  char *buf)
135 {
136 	struct drm_device *ddev = dev_get_drvdata(dev);
137 	struct amdgpu_device *adev = drm_to_adev(ddev);
138 	enum amd_pm_state_type pm;
139 	int ret;
140 
141 	if (amdgpu_in_reset(adev))
142 		return -EPERM;
143 	if (adev->in_suspend && !adev->in_runpm)
144 		return -EPERM;
145 
146 	ret = pm_runtime_get_sync(ddev->dev);
147 	if (ret < 0) {
148 		pm_runtime_put_autosuspend(ddev->dev);
149 		return ret;
150 	}
151 
152 	amdgpu_dpm_get_current_power_state(adev, &pm);
153 
154 	pm_runtime_mark_last_busy(ddev->dev);
155 	pm_runtime_put_autosuspend(ddev->dev);
156 
157 	return sysfs_emit(buf, "%s\n",
158 			  (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
159 			  (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
160 }
161 
162 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
163 					  struct device_attribute *attr,
164 					  const char *buf,
165 					  size_t count)
166 {
167 	struct drm_device *ddev = dev_get_drvdata(dev);
168 	struct amdgpu_device *adev = drm_to_adev(ddev);
169 	enum amd_pm_state_type  state;
170 	int ret;
171 
172 	if (amdgpu_in_reset(adev))
173 		return -EPERM;
174 	if (adev->in_suspend && !adev->in_runpm)
175 		return -EPERM;
176 
177 	if (strncmp("battery", buf, strlen("battery")) == 0)
178 		state = POWER_STATE_TYPE_BATTERY;
179 	else if (strncmp("balanced", buf, strlen("balanced")) == 0)
180 		state = POWER_STATE_TYPE_BALANCED;
181 	else if (strncmp("performance", buf, strlen("performance")) == 0)
182 		state = POWER_STATE_TYPE_PERFORMANCE;
183 	else
184 		return -EINVAL;
185 
186 	ret = pm_runtime_get_sync(ddev->dev);
187 	if (ret < 0) {
188 		pm_runtime_put_autosuspend(ddev->dev);
189 		return ret;
190 	}
191 
192 	amdgpu_dpm_set_power_state(adev, state);
193 
194 	pm_runtime_mark_last_busy(ddev->dev);
195 	pm_runtime_put_autosuspend(ddev->dev);
196 
197 	return count;
198 }
199 
200 
201 /**
202  * DOC: power_dpm_force_performance_level
203  *
204  * The amdgpu driver provides a sysfs API for adjusting certain power
205  * related parameters.  The file power_dpm_force_performance_level is
206  * used for this.  It accepts the following arguments:
207  *
208  * - auto
209  *
210  * - low
211  *
212  * - high
213  *
214  * - manual
215  *
216  * - profile_standard
217  *
218  * - profile_min_sclk
219  *
220  * - profile_min_mclk
221  *
222  * - profile_peak
223  *
224  * auto
225  *
226  * When auto is selected, the driver will attempt to dynamically select
227  * the optimal power profile for current conditions in the driver.
228  *
229  * low
230  *
231  * When low is selected, the clocks are forced to the lowest power state.
232  *
233  * high
234  *
235  * When high is selected, the clocks are forced to the highest power state.
236  *
237  * manual
238  *
239  * When manual is selected, the user can manually adjust which power states
240  * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
241  * and pp_dpm_pcie files and adjust the power state transition heuristics
242  * via the pp_power_profile_mode sysfs file.
243  *
244  * profile_standard
245  * profile_min_sclk
246  * profile_min_mclk
247  * profile_peak
248  *
249  * When the profiling modes are selected, clock and power gating are
250  * disabled and the clocks are set for different profiling cases. This
251  * mode is recommended for profiling specific work loads where you do
252  * not want clock or power gating for clock fluctuation to interfere
253  * with your results. profile_standard sets the clocks to a fixed clock
254  * level which varies from asic to asic.  profile_min_sclk forces the sclk
255  * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
256  * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
257  *
258  */
259 
260 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
261 							    struct device_attribute *attr,
262 							    char *buf)
263 {
264 	struct drm_device *ddev = dev_get_drvdata(dev);
265 	struct amdgpu_device *adev = drm_to_adev(ddev);
266 	enum amd_dpm_forced_level level = 0xff;
267 	int ret;
268 
269 	if (amdgpu_in_reset(adev))
270 		return -EPERM;
271 	if (adev->in_suspend && !adev->in_runpm)
272 		return -EPERM;
273 
274 	ret = pm_runtime_get_sync(ddev->dev);
275 	if (ret < 0) {
276 		pm_runtime_put_autosuspend(ddev->dev);
277 		return ret;
278 	}
279 
280 	level = amdgpu_dpm_get_performance_level(adev);
281 
282 	pm_runtime_mark_last_busy(ddev->dev);
283 	pm_runtime_put_autosuspend(ddev->dev);
284 
285 	return sysfs_emit(buf, "%s\n",
286 			  (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
287 			  (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
288 			  (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
289 			  (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
290 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
291 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
292 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
293 			  (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
294 			  (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
295 			  "unknown");
296 }
297 
298 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
299 							    struct device_attribute *attr,
300 							    const char *buf,
301 							    size_t count)
302 {
303 	struct drm_device *ddev = dev_get_drvdata(dev);
304 	struct amdgpu_device *adev = drm_to_adev(ddev);
305 	enum amd_dpm_forced_level level;
306 	int ret = 0;
307 
308 	if (amdgpu_in_reset(adev))
309 		return -EPERM;
310 	if (adev->in_suspend && !adev->in_runpm)
311 		return -EPERM;
312 
313 	if (strncmp("low", buf, strlen("low")) == 0) {
314 		level = AMD_DPM_FORCED_LEVEL_LOW;
315 	} else if (strncmp("high", buf, strlen("high")) == 0) {
316 		level = AMD_DPM_FORCED_LEVEL_HIGH;
317 	} else if (strncmp("auto", buf, strlen("auto")) == 0) {
318 		level = AMD_DPM_FORCED_LEVEL_AUTO;
319 	} else if (strncmp("manual", buf, strlen("manual")) == 0) {
320 		level = AMD_DPM_FORCED_LEVEL_MANUAL;
321 	} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
322 		level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
323 	} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
324 		level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
325 	} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
326 		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
327 	} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
328 		level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
329 	} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
330 		level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
331 	} else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
332 		level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
333 	}  else {
334 		return -EINVAL;
335 	}
336 
337 	ret = pm_runtime_get_sync(ddev->dev);
338 	if (ret < 0) {
339 		pm_runtime_put_autosuspend(ddev->dev);
340 		return ret;
341 	}
342 
343 	mutex_lock(&adev->pm.stable_pstate_ctx_lock);
344 	if (amdgpu_dpm_force_performance_level(adev, level)) {
345 		pm_runtime_mark_last_busy(ddev->dev);
346 		pm_runtime_put_autosuspend(ddev->dev);
347 		mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
348 		return -EINVAL;
349 	}
350 	/* override whatever a user ctx may have set */
351 	adev->pm.stable_pstate_ctx = NULL;
352 	mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
353 
354 	pm_runtime_mark_last_busy(ddev->dev);
355 	pm_runtime_put_autosuspend(ddev->dev);
356 
357 	return count;
358 }
359 
360 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
361 		struct device_attribute *attr,
362 		char *buf)
363 {
364 	struct drm_device *ddev = dev_get_drvdata(dev);
365 	struct amdgpu_device *adev = drm_to_adev(ddev);
366 	struct pp_states_info data;
367 	uint32_t i;
368 	int buf_len, ret;
369 
370 	if (amdgpu_in_reset(adev))
371 		return -EPERM;
372 	if (adev->in_suspend && !adev->in_runpm)
373 		return -EPERM;
374 
375 	ret = pm_runtime_get_sync(ddev->dev);
376 	if (ret < 0) {
377 		pm_runtime_put_autosuspend(ddev->dev);
378 		return ret;
379 	}
380 
381 	if (amdgpu_dpm_get_pp_num_states(adev, &data))
382 		memset(&data, 0, sizeof(data));
383 
384 	pm_runtime_mark_last_busy(ddev->dev);
385 	pm_runtime_put_autosuspend(ddev->dev);
386 
387 	buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
388 	for (i = 0; i < data.nums; i++)
389 		buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
390 				(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
391 				(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
392 				(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
393 				(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
394 
395 	return buf_len;
396 }
397 
398 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
399 		struct device_attribute *attr,
400 		char *buf)
401 {
402 	struct drm_device *ddev = dev_get_drvdata(dev);
403 	struct amdgpu_device *adev = drm_to_adev(ddev);
404 	struct pp_states_info data = {0};
405 	enum amd_pm_state_type pm = 0;
406 	int i = 0, ret = 0;
407 
408 	if (amdgpu_in_reset(adev))
409 		return -EPERM;
410 	if (adev->in_suspend && !adev->in_runpm)
411 		return -EPERM;
412 
413 	ret = pm_runtime_get_sync(ddev->dev);
414 	if (ret < 0) {
415 		pm_runtime_put_autosuspend(ddev->dev);
416 		return ret;
417 	}
418 
419 	amdgpu_dpm_get_current_power_state(adev, &pm);
420 
421 	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
422 
423 	pm_runtime_mark_last_busy(ddev->dev);
424 	pm_runtime_put_autosuspend(ddev->dev);
425 
426 	if (ret)
427 		return ret;
428 
429 	for (i = 0; i < data.nums; i++) {
430 		if (pm == data.states[i])
431 			break;
432 	}
433 
434 	if (i == data.nums)
435 		i = -EINVAL;
436 
437 	return sysfs_emit(buf, "%d\n", i);
438 }
439 
440 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
441 		struct device_attribute *attr,
442 		char *buf)
443 {
444 	struct drm_device *ddev = dev_get_drvdata(dev);
445 	struct amdgpu_device *adev = drm_to_adev(ddev);
446 
447 	if (amdgpu_in_reset(adev))
448 		return -EPERM;
449 	if (adev->in_suspend && !adev->in_runpm)
450 		return -EPERM;
451 
452 	if (adev->pm.pp_force_state_enabled)
453 		return amdgpu_get_pp_cur_state(dev, attr, buf);
454 	else
455 		return sysfs_emit(buf, "\n");
456 }
457 
458 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
459 		struct device_attribute *attr,
460 		const char *buf,
461 		size_t count)
462 {
463 	struct drm_device *ddev = dev_get_drvdata(dev);
464 	struct amdgpu_device *adev = drm_to_adev(ddev);
465 	enum amd_pm_state_type state = 0;
466 	struct pp_states_info data;
467 	unsigned long idx;
468 	int ret;
469 
470 	if (amdgpu_in_reset(adev))
471 		return -EPERM;
472 	if (adev->in_suspend && !adev->in_runpm)
473 		return -EPERM;
474 
475 	adev->pm.pp_force_state_enabled = false;
476 
477 	if (strlen(buf) == 1)
478 		return count;
479 
480 	ret = kstrtoul(buf, 0, &idx);
481 	if (ret || idx >= ARRAY_SIZE(data.states))
482 		return -EINVAL;
483 
484 	idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
485 
486 	ret = pm_runtime_get_sync(ddev->dev);
487 	if (ret < 0) {
488 		pm_runtime_put_autosuspend(ddev->dev);
489 		return ret;
490 	}
491 
492 	ret = amdgpu_dpm_get_pp_num_states(adev, &data);
493 	if (ret)
494 		goto err_out;
495 
496 	state = data.states[idx];
497 
498 	/* only set user selected power states */
499 	if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
500 	    state != POWER_STATE_TYPE_DEFAULT) {
501 		ret = amdgpu_dpm_dispatch_task(adev,
502 				AMD_PP_TASK_ENABLE_USER_STATE, &state);
503 		if (ret)
504 			goto err_out;
505 
506 		adev->pm.pp_force_state_enabled = true;
507 	}
508 
509 	pm_runtime_mark_last_busy(ddev->dev);
510 	pm_runtime_put_autosuspend(ddev->dev);
511 
512 	return count;
513 
514 err_out:
515 	pm_runtime_mark_last_busy(ddev->dev);
516 	pm_runtime_put_autosuspend(ddev->dev);
517 	return ret;
518 }
519 
520 /**
521  * DOC: pp_table
522  *
523  * The amdgpu driver provides a sysfs API for uploading new powerplay
524  * tables.  The file pp_table is used for this.  Reading the file
525  * will dump the current power play table.  Writing to the file
526  * will attempt to upload a new powerplay table and re-initialize
527  * powerplay using that new table.
528  *
529  */
530 
531 static ssize_t amdgpu_get_pp_table(struct device *dev,
532 		struct device_attribute *attr,
533 		char *buf)
534 {
535 	struct drm_device *ddev = dev_get_drvdata(dev);
536 	struct amdgpu_device *adev = drm_to_adev(ddev);
537 	char *table = NULL;
538 	int size, ret;
539 
540 	if (amdgpu_in_reset(adev))
541 		return -EPERM;
542 	if (adev->in_suspend && !adev->in_runpm)
543 		return -EPERM;
544 
545 	ret = pm_runtime_get_sync(ddev->dev);
546 	if (ret < 0) {
547 		pm_runtime_put_autosuspend(ddev->dev);
548 		return ret;
549 	}
550 
551 	size = amdgpu_dpm_get_pp_table(adev, &table);
552 
553 	pm_runtime_mark_last_busy(ddev->dev);
554 	pm_runtime_put_autosuspend(ddev->dev);
555 
556 	if (size <= 0)
557 		return size;
558 
559 	if (size >= PAGE_SIZE)
560 		size = PAGE_SIZE - 1;
561 
562 	memcpy(buf, table, size);
563 
564 	return size;
565 }
566 
567 static ssize_t amdgpu_set_pp_table(struct device *dev,
568 		struct device_attribute *attr,
569 		const char *buf,
570 		size_t count)
571 {
572 	struct drm_device *ddev = dev_get_drvdata(dev);
573 	struct amdgpu_device *adev = drm_to_adev(ddev);
574 	int ret = 0;
575 
576 	if (amdgpu_in_reset(adev))
577 		return -EPERM;
578 	if (adev->in_suspend && !adev->in_runpm)
579 		return -EPERM;
580 
581 	ret = pm_runtime_get_sync(ddev->dev);
582 	if (ret < 0) {
583 		pm_runtime_put_autosuspend(ddev->dev);
584 		return ret;
585 	}
586 
587 	ret = amdgpu_dpm_set_pp_table(adev, buf, count);
588 
589 	pm_runtime_mark_last_busy(ddev->dev);
590 	pm_runtime_put_autosuspend(ddev->dev);
591 
592 	if (ret)
593 		return ret;
594 
595 	return count;
596 }
597 
598 /**
599  * DOC: pp_od_clk_voltage
600  *
601  * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
602  * in each power level within a power state.  The pp_od_clk_voltage is used for
603  * this.
604  *
605  * Note that the actual memory controller clock rate are exposed, not
606  * the effective memory clock of the DRAMs. To translate it, use the
607  * following formula:
608  *
609  * Clock conversion (Mhz):
610  *
611  * HBM: effective_memory_clock = memory_controller_clock * 1
612  *
613  * G5: effective_memory_clock = memory_controller_clock * 1
614  *
615  * G6: effective_memory_clock = memory_controller_clock * 2
616  *
617  * DRAM data rate (MT/s):
618  *
619  * HBM: effective_memory_clock * 2 = data_rate
620  *
621  * G5: effective_memory_clock * 4 = data_rate
622  *
623  * G6: effective_memory_clock * 8 = data_rate
624  *
625  * Bandwidth (MB/s):
626  *
627  * data_rate * vram_bit_width / 8 = memory_bandwidth
628  *
629  * Some examples:
630  *
631  * G5 on RX460:
632  *
633  * memory_controller_clock = 1750 Mhz
634  *
635  * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
636  *
637  * data rate = 1750 * 4 = 7000 MT/s
638  *
639  * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
640  *
641  * G6 on RX5700:
642  *
643  * memory_controller_clock = 875 Mhz
644  *
645  * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
646  *
647  * data rate = 1750 * 8 = 14000 MT/s
648  *
649  * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
650  *
651  * < For Vega10 and previous ASICs >
652  *
653  * Reading the file will display:
654  *
655  * - a list of engine clock levels and voltages labeled OD_SCLK
656  *
657  * - a list of memory clock levels and voltages labeled OD_MCLK
658  *
659  * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
660  *
661  * To manually adjust these settings, first select manual using
662  * power_dpm_force_performance_level. Enter a new value for each
663  * level by writing a string that contains "s/m level clock voltage" to
664  * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
665  * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
666  * 810 mV.  When you have edited all of the states as needed, write
667  * "c" (commit) to the file to commit your changes.  If you want to reset to the
668  * default power levels, write "r" (reset) to the file to reset them.
669  *
670  *
671  * < For Vega20 and newer ASICs >
672  *
673  * Reading the file will display:
674  *
675  * - minimum and maximum engine clock labeled OD_SCLK
676  *
677  * - minimum(not available for Vega20 and Navi1x) and maximum memory
678  *   clock labeled OD_MCLK
679  *
680  * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
681  *   They can be used to calibrate the sclk voltage curve. This is
682  *   available for Vega20 and NV1X.
683  *
684  * - voltage offset(in mV) applied on target voltage calculation.
685  *   This is available for Sienna Cichlid, Navy Flounder, Dimgrey
686  *   Cavefish and some later SMU13 ASICs. For these ASICs, the target
687  *   voltage calculation can be illustrated by "voltage = voltage
688  *   calculated from v/f curve + overdrive vddgfx offset"
689  *
690  * - a list of valid ranges for sclk, mclk, voltage curve points
691  *   or voltage offset labeled OD_RANGE
692  *
693  * < For APUs >
694  *
695  * Reading the file will display:
696  *
697  * - minimum and maximum engine clock labeled OD_SCLK
698  *
699  * - a list of valid ranges for sclk labeled OD_RANGE
700  *
701  * < For VanGogh >
702  *
703  * Reading the file will display:
704  *
705  * - minimum and maximum engine clock labeled OD_SCLK
706  * - minimum and maximum core clocks labeled OD_CCLK
707  *
708  * - a list of valid ranges for sclk and cclk labeled OD_RANGE
709  *
710  * To manually adjust these settings:
711  *
712  * - First select manual using power_dpm_force_performance_level
713  *
714  * - For clock frequency setting, enter a new value by writing a
715  *   string that contains "s/m index clock" to the file. The index
716  *   should be 0 if to set minimum clock. And 1 if to set maximum
717  *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
718  *   "m 1 800" will update maximum mclk to be 800Mhz. For core
719  *   clocks on VanGogh, the string contains "p core index clock".
720  *   E.g., "p 2 0 800" would set the minimum core clock on core
721  *   2 to 800Mhz.
722  *
723  *   For sclk voltage curve supported by Vega20 and NV1X, enter the new
724  *   values by writing a string that contains "vc point clock voltage"
725  *   to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
726  *   600" will update point1 with clock set as 300Mhz and voltage as 600mV.
727  *   "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
728  *   voltage 1000mV.
729  *
730  *   For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
731  *   Cavefish and some later SMU13 ASICs, enter the new value by writing a
732  *   string that contains "vo offset". E.g., "vo -10" will update the extra
733  *   voltage offset applied to the whole v/f curve line as -10mv.
734  *
735  * - When you have edited all of the states as needed, write "c" (commit)
736  *   to the file to commit your changes
737  *
738  * - If you want to reset to the default power levels, write "r" (reset)
739  *   to the file to reset them
740  *
741  */
742 
743 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
744 		struct device_attribute *attr,
745 		const char *buf,
746 		size_t count)
747 {
748 	struct drm_device *ddev = dev_get_drvdata(dev);
749 	struct amdgpu_device *adev = drm_to_adev(ddev);
750 	int ret;
751 	uint32_t parameter_size = 0;
752 	long parameter[64];
753 	char buf_cpy[128];
754 	char *tmp_str;
755 	char *sub_str;
756 	const char delimiter[3] = {' ', '\n', '\0'};
757 	uint32_t type;
758 
759 	if (amdgpu_in_reset(adev))
760 		return -EPERM;
761 	if (adev->in_suspend && !adev->in_runpm)
762 		return -EPERM;
763 
764 	if (count > 127 || count == 0)
765 		return -EINVAL;
766 
767 	if (*buf == 's')
768 		type = PP_OD_EDIT_SCLK_VDDC_TABLE;
769 	else if (*buf == 'p')
770 		type = PP_OD_EDIT_CCLK_VDDC_TABLE;
771 	else if (*buf == 'm')
772 		type = PP_OD_EDIT_MCLK_VDDC_TABLE;
773 	else if (*buf == 'r')
774 		type = PP_OD_RESTORE_DEFAULT_TABLE;
775 	else if (*buf == 'c')
776 		type = PP_OD_COMMIT_DPM_TABLE;
777 	else if (!strncmp(buf, "vc", 2))
778 		type = PP_OD_EDIT_VDDC_CURVE;
779 	else if (!strncmp(buf, "vo", 2))
780 		type = PP_OD_EDIT_VDDGFX_OFFSET;
781 	else
782 		return -EINVAL;
783 
784 	memcpy(buf_cpy, buf, count);
785 	buf_cpy[count] = 0;
786 
787 	tmp_str = buf_cpy;
788 
789 	if ((type == PP_OD_EDIT_VDDC_CURVE) ||
790 	     (type == PP_OD_EDIT_VDDGFX_OFFSET))
791 		tmp_str++;
792 	while (isspace(*++tmp_str));
793 
794 	while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
795 		if (strlen(sub_str) == 0)
796 			continue;
797 		ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
798 		if (ret)
799 			return -EINVAL;
800 		parameter_size++;
801 
802 		if (!tmp_str)
803 			break;
804 
805 		while (isspace(*tmp_str))
806 			tmp_str++;
807 	}
808 
809 	ret = pm_runtime_get_sync(ddev->dev);
810 	if (ret < 0) {
811 		pm_runtime_put_autosuspend(ddev->dev);
812 		return ret;
813 	}
814 
815 	if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
816 					      type,
817 					      parameter,
818 					      parameter_size))
819 		goto err_out;
820 
821 	if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
822 					  parameter, parameter_size))
823 		goto err_out;
824 
825 	if (type == PP_OD_COMMIT_DPM_TABLE) {
826 		if (amdgpu_dpm_dispatch_task(adev,
827 					     AMD_PP_TASK_READJUST_POWER_STATE,
828 					     NULL))
829 			goto err_out;
830 	}
831 
832 	pm_runtime_mark_last_busy(ddev->dev);
833 	pm_runtime_put_autosuspend(ddev->dev);
834 
835 	return count;
836 
837 err_out:
838 	pm_runtime_mark_last_busy(ddev->dev);
839 	pm_runtime_put_autosuspend(ddev->dev);
840 	return -EINVAL;
841 }
842 
843 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
844 		struct device_attribute *attr,
845 		char *buf)
846 {
847 	struct drm_device *ddev = dev_get_drvdata(dev);
848 	struct amdgpu_device *adev = drm_to_adev(ddev);
849 	int size = 0;
850 	int ret;
851 	enum pp_clock_type od_clocks[6] = {
852 		OD_SCLK,
853 		OD_MCLK,
854 		OD_VDDC_CURVE,
855 		OD_RANGE,
856 		OD_VDDGFX_OFFSET,
857 		OD_CCLK,
858 	};
859 	uint clk_index;
860 
861 	if (amdgpu_in_reset(adev))
862 		return -EPERM;
863 	if (adev->in_suspend && !adev->in_runpm)
864 		return -EPERM;
865 
866 	ret = pm_runtime_get_sync(ddev->dev);
867 	if (ret < 0) {
868 		pm_runtime_put_autosuspend(ddev->dev);
869 		return ret;
870 	}
871 
872 	for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
873 		ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
874 		if (ret)
875 			break;
876 	}
877 	if (ret == -ENOENT) {
878 		size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
879 		size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
880 		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
881 		size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
882 		size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
883 		size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
884 	}
885 
886 	if (size == 0)
887 		size = sysfs_emit(buf, "\n");
888 
889 	pm_runtime_mark_last_busy(ddev->dev);
890 	pm_runtime_put_autosuspend(ddev->dev);
891 
892 	return size;
893 }
894 
895 /**
896  * DOC: pp_features
897  *
898  * The amdgpu driver provides a sysfs API for adjusting what powerplay
899  * features to be enabled. The file pp_features is used for this. And
900  * this is only available for Vega10 and later dGPUs.
901  *
902  * Reading back the file will show you the followings:
903  * - Current ppfeature masks
904  * - List of the all supported powerplay features with their naming,
905  *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
906  *
907  * To manually enable or disable a specific feature, just set or clear
908  * the corresponding bit from original ppfeature masks and input the
909  * new ppfeature masks.
910  */
911 static ssize_t amdgpu_set_pp_features(struct device *dev,
912 				      struct device_attribute *attr,
913 				      const char *buf,
914 				      size_t count)
915 {
916 	struct drm_device *ddev = dev_get_drvdata(dev);
917 	struct amdgpu_device *adev = drm_to_adev(ddev);
918 	uint64_t featuremask;
919 	int ret;
920 
921 	if (amdgpu_in_reset(adev))
922 		return -EPERM;
923 	if (adev->in_suspend && !adev->in_runpm)
924 		return -EPERM;
925 
926 	ret = kstrtou64(buf, 0, &featuremask);
927 	if (ret)
928 		return -EINVAL;
929 
930 	ret = pm_runtime_get_sync(ddev->dev);
931 	if (ret < 0) {
932 		pm_runtime_put_autosuspend(ddev->dev);
933 		return ret;
934 	}
935 
936 	ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
937 
938 	pm_runtime_mark_last_busy(ddev->dev);
939 	pm_runtime_put_autosuspend(ddev->dev);
940 
941 	if (ret)
942 		return -EINVAL;
943 
944 	return count;
945 }
946 
947 static ssize_t amdgpu_get_pp_features(struct device *dev,
948 				      struct device_attribute *attr,
949 				      char *buf)
950 {
951 	struct drm_device *ddev = dev_get_drvdata(dev);
952 	struct amdgpu_device *adev = drm_to_adev(ddev);
953 	ssize_t size;
954 	int ret;
955 
956 	if (amdgpu_in_reset(adev))
957 		return -EPERM;
958 	if (adev->in_suspend && !adev->in_runpm)
959 		return -EPERM;
960 
961 	ret = pm_runtime_get_sync(ddev->dev);
962 	if (ret < 0) {
963 		pm_runtime_put_autosuspend(ddev->dev);
964 		return ret;
965 	}
966 
967 	size = amdgpu_dpm_get_ppfeature_status(adev, buf);
968 	if (size <= 0)
969 		size = sysfs_emit(buf, "\n");
970 
971 	pm_runtime_mark_last_busy(ddev->dev);
972 	pm_runtime_put_autosuspend(ddev->dev);
973 
974 	return size;
975 }
976 
977 /**
978  * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
979  *
980  * The amdgpu driver provides a sysfs API for adjusting what power levels
981  * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
982  * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
983  * this.
984  *
985  * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
986  * Vega10 and later ASICs.
987  * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
988  *
989  * Reading back the files will show you the available power levels within
990  * the power state and the clock information for those levels. If deep sleep is
991  * applied to a clock, the level will be denoted by a special level 'S:'
992  * E.g., ::
993  *
994  *  S: 19Mhz *
995  *  0: 615Mhz
996  *  1: 800Mhz
997  *  2: 888Mhz
998  *  3: 1000Mhz
999  *
1000  *
1001  * To manually adjust these states, first select manual using
1002  * power_dpm_force_performance_level.
1003  * Secondly, enter a new value for each level by inputing a string that
1004  * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
1005  * E.g.,
1006  *
1007  * .. code-block:: bash
1008  *
1009  *	echo "4 5 6" > pp_dpm_sclk
1010  *
1011  * will enable sclk levels 4, 5, and 6.
1012  *
1013  * NOTE: change to the dcefclk max dpm level is not supported now
1014  */
1015 
1016 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1017 		enum pp_clock_type type,
1018 		char *buf)
1019 {
1020 	struct drm_device *ddev = dev_get_drvdata(dev);
1021 	struct amdgpu_device *adev = drm_to_adev(ddev);
1022 	int size = 0;
1023 	int ret = 0;
1024 
1025 	if (amdgpu_in_reset(adev))
1026 		return -EPERM;
1027 	if (adev->in_suspend && !adev->in_runpm)
1028 		return -EPERM;
1029 
1030 	ret = pm_runtime_get_sync(ddev->dev);
1031 	if (ret < 0) {
1032 		pm_runtime_put_autosuspend(ddev->dev);
1033 		return ret;
1034 	}
1035 
1036 	ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1037 	if (ret == -ENOENT)
1038 		size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1039 
1040 	if (size == 0)
1041 		size = sysfs_emit(buf, "\n");
1042 
1043 	pm_runtime_mark_last_busy(ddev->dev);
1044 	pm_runtime_put_autosuspend(ddev->dev);
1045 
1046 	return size;
1047 }
1048 
1049 /*
1050  * Worst case: 32 bits individually specified, in octal at 12 characters
1051  * per line (+1 for \n).
1052  */
1053 #define AMDGPU_MASK_BUF_MAX	(32 * 13)
1054 
1055 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1056 {
1057 	int ret;
1058 	unsigned long level;
1059 	char *sub_str = NULL;
1060 	char *tmp;
1061 	char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1062 	const char delimiter[3] = {' ', '\n', '\0'};
1063 	size_t bytes;
1064 
1065 	*mask = 0;
1066 
1067 	bytes = min(count, sizeof(buf_cpy) - 1);
1068 	memcpy(buf_cpy, buf, bytes);
1069 	buf_cpy[bytes] = '\0';
1070 	tmp = buf_cpy;
1071 	while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1072 		if (strlen(sub_str)) {
1073 			ret = kstrtoul(sub_str, 0, &level);
1074 			if (ret || level > 31)
1075 				return -EINVAL;
1076 			*mask |= 1 << level;
1077 		} else
1078 			break;
1079 	}
1080 
1081 	return 0;
1082 }
1083 
1084 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1085 		enum pp_clock_type type,
1086 		const char *buf,
1087 		size_t count)
1088 {
1089 	struct drm_device *ddev = dev_get_drvdata(dev);
1090 	struct amdgpu_device *adev = drm_to_adev(ddev);
1091 	int ret;
1092 	uint32_t mask = 0;
1093 
1094 	if (amdgpu_in_reset(adev))
1095 		return -EPERM;
1096 	if (adev->in_suspend && !adev->in_runpm)
1097 		return -EPERM;
1098 
1099 	ret = amdgpu_read_mask(buf, count, &mask);
1100 	if (ret)
1101 		return ret;
1102 
1103 	ret = pm_runtime_get_sync(ddev->dev);
1104 	if (ret < 0) {
1105 		pm_runtime_put_autosuspend(ddev->dev);
1106 		return ret;
1107 	}
1108 
1109 	ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1110 
1111 	pm_runtime_mark_last_busy(ddev->dev);
1112 	pm_runtime_put_autosuspend(ddev->dev);
1113 
1114 	if (ret)
1115 		return -EINVAL;
1116 
1117 	return count;
1118 }
1119 
1120 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1121 		struct device_attribute *attr,
1122 		char *buf)
1123 {
1124 	return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1125 }
1126 
1127 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1128 		struct device_attribute *attr,
1129 		const char *buf,
1130 		size_t count)
1131 {
1132 	return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1133 }
1134 
1135 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1136 		struct device_attribute *attr,
1137 		char *buf)
1138 {
1139 	return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1140 }
1141 
1142 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1143 		struct device_attribute *attr,
1144 		const char *buf,
1145 		size_t count)
1146 {
1147 	return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1148 }
1149 
1150 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1151 		struct device_attribute *attr,
1152 		char *buf)
1153 {
1154 	return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1155 }
1156 
1157 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1158 		struct device_attribute *attr,
1159 		const char *buf,
1160 		size_t count)
1161 {
1162 	return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1163 }
1164 
1165 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1166 		struct device_attribute *attr,
1167 		char *buf)
1168 {
1169 	return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1170 }
1171 
1172 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1173 		struct device_attribute *attr,
1174 		const char *buf,
1175 		size_t count)
1176 {
1177 	return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1178 }
1179 
1180 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1181 		struct device_attribute *attr,
1182 		char *buf)
1183 {
1184 	return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1185 }
1186 
1187 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1188 		struct device_attribute *attr,
1189 		const char *buf,
1190 		size_t count)
1191 {
1192 	return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1193 }
1194 
1195 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1196 		struct device_attribute *attr,
1197 		char *buf)
1198 {
1199 	return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1200 }
1201 
1202 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1203 		struct device_attribute *attr,
1204 		const char *buf,
1205 		size_t count)
1206 {
1207 	return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1208 }
1209 
1210 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1211 		struct device_attribute *attr,
1212 		char *buf)
1213 {
1214 	return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1215 }
1216 
1217 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1218 		struct device_attribute *attr,
1219 		const char *buf,
1220 		size_t count)
1221 {
1222 	return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1223 }
1224 
1225 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1226 		struct device_attribute *attr,
1227 		char *buf)
1228 {
1229 	return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1230 }
1231 
1232 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1233 		struct device_attribute *attr,
1234 		const char *buf,
1235 		size_t count)
1236 {
1237 	return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1238 }
1239 
1240 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1241 		struct device_attribute *attr,
1242 		char *buf)
1243 {
1244 	return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1245 }
1246 
1247 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1248 		struct device_attribute *attr,
1249 		const char *buf,
1250 		size_t count)
1251 {
1252 	return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1253 }
1254 
1255 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1256 		struct device_attribute *attr,
1257 		char *buf)
1258 {
1259 	return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1260 }
1261 
1262 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1263 		struct device_attribute *attr,
1264 		const char *buf,
1265 		size_t count)
1266 {
1267 	return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1268 }
1269 
1270 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1271 		struct device_attribute *attr,
1272 		char *buf)
1273 {
1274 	struct drm_device *ddev = dev_get_drvdata(dev);
1275 	struct amdgpu_device *adev = drm_to_adev(ddev);
1276 	uint32_t value = 0;
1277 	int ret;
1278 
1279 	if (amdgpu_in_reset(adev))
1280 		return -EPERM;
1281 	if (adev->in_suspend && !adev->in_runpm)
1282 		return -EPERM;
1283 
1284 	ret = pm_runtime_get_sync(ddev->dev);
1285 	if (ret < 0) {
1286 		pm_runtime_put_autosuspend(ddev->dev);
1287 		return ret;
1288 	}
1289 
1290 	value = amdgpu_dpm_get_sclk_od(adev);
1291 
1292 	pm_runtime_mark_last_busy(ddev->dev);
1293 	pm_runtime_put_autosuspend(ddev->dev);
1294 
1295 	return sysfs_emit(buf, "%d\n", value);
1296 }
1297 
1298 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1299 		struct device_attribute *attr,
1300 		const char *buf,
1301 		size_t count)
1302 {
1303 	struct drm_device *ddev = dev_get_drvdata(dev);
1304 	struct amdgpu_device *adev = drm_to_adev(ddev);
1305 	int ret;
1306 	long int value;
1307 
1308 	if (amdgpu_in_reset(adev))
1309 		return -EPERM;
1310 	if (adev->in_suspend && !adev->in_runpm)
1311 		return -EPERM;
1312 
1313 	ret = kstrtol(buf, 0, &value);
1314 
1315 	if (ret)
1316 		return -EINVAL;
1317 
1318 	ret = pm_runtime_get_sync(ddev->dev);
1319 	if (ret < 0) {
1320 		pm_runtime_put_autosuspend(ddev->dev);
1321 		return ret;
1322 	}
1323 
1324 	amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1325 
1326 	pm_runtime_mark_last_busy(ddev->dev);
1327 	pm_runtime_put_autosuspend(ddev->dev);
1328 
1329 	return count;
1330 }
1331 
1332 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1333 		struct device_attribute *attr,
1334 		char *buf)
1335 {
1336 	struct drm_device *ddev = dev_get_drvdata(dev);
1337 	struct amdgpu_device *adev = drm_to_adev(ddev);
1338 	uint32_t value = 0;
1339 	int ret;
1340 
1341 	if (amdgpu_in_reset(adev))
1342 		return -EPERM;
1343 	if (adev->in_suspend && !adev->in_runpm)
1344 		return -EPERM;
1345 
1346 	ret = pm_runtime_get_sync(ddev->dev);
1347 	if (ret < 0) {
1348 		pm_runtime_put_autosuspend(ddev->dev);
1349 		return ret;
1350 	}
1351 
1352 	value = amdgpu_dpm_get_mclk_od(adev);
1353 
1354 	pm_runtime_mark_last_busy(ddev->dev);
1355 	pm_runtime_put_autosuspend(ddev->dev);
1356 
1357 	return sysfs_emit(buf, "%d\n", value);
1358 }
1359 
1360 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1361 		struct device_attribute *attr,
1362 		const char *buf,
1363 		size_t count)
1364 {
1365 	struct drm_device *ddev = dev_get_drvdata(dev);
1366 	struct amdgpu_device *adev = drm_to_adev(ddev);
1367 	int ret;
1368 	long int value;
1369 
1370 	if (amdgpu_in_reset(adev))
1371 		return -EPERM;
1372 	if (adev->in_suspend && !adev->in_runpm)
1373 		return -EPERM;
1374 
1375 	ret = kstrtol(buf, 0, &value);
1376 
1377 	if (ret)
1378 		return -EINVAL;
1379 
1380 	ret = pm_runtime_get_sync(ddev->dev);
1381 	if (ret < 0) {
1382 		pm_runtime_put_autosuspend(ddev->dev);
1383 		return ret;
1384 	}
1385 
1386 	amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1387 
1388 	pm_runtime_mark_last_busy(ddev->dev);
1389 	pm_runtime_put_autosuspend(ddev->dev);
1390 
1391 	return count;
1392 }
1393 
1394 /**
1395  * DOC: pp_power_profile_mode
1396  *
1397  * The amdgpu driver provides a sysfs API for adjusting the heuristics
1398  * related to switching between power levels in a power state.  The file
1399  * pp_power_profile_mode is used for this.
1400  *
1401  * Reading this file outputs a list of all of the predefined power profiles
1402  * and the relevant heuristics settings for that profile.
1403  *
1404  * To select a profile or create a custom profile, first select manual using
1405  * power_dpm_force_performance_level.  Writing the number of a predefined
1406  * profile to pp_power_profile_mode will enable those heuristics.  To
1407  * create a custom set of heuristics, write a string of numbers to the file
1408  * starting with the number of the custom profile along with a setting
1409  * for each heuristic parameter.  Due to differences across asic families
1410  * the heuristic parameters vary from family to family.
1411  *
1412  */
1413 
1414 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1415 		struct device_attribute *attr,
1416 		char *buf)
1417 {
1418 	struct drm_device *ddev = dev_get_drvdata(dev);
1419 	struct amdgpu_device *adev = drm_to_adev(ddev);
1420 	ssize_t size;
1421 	int ret;
1422 
1423 	if (amdgpu_in_reset(adev))
1424 		return -EPERM;
1425 	if (adev->in_suspend && !adev->in_runpm)
1426 		return -EPERM;
1427 
1428 	ret = pm_runtime_get_sync(ddev->dev);
1429 	if (ret < 0) {
1430 		pm_runtime_put_autosuspend(ddev->dev);
1431 		return ret;
1432 	}
1433 
1434 	size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1435 	if (size <= 0)
1436 		size = sysfs_emit(buf, "\n");
1437 
1438 	pm_runtime_mark_last_busy(ddev->dev);
1439 	pm_runtime_put_autosuspend(ddev->dev);
1440 
1441 	return size;
1442 }
1443 
1444 
1445 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1446 		struct device_attribute *attr,
1447 		const char *buf,
1448 		size_t count)
1449 {
1450 	int ret;
1451 	struct drm_device *ddev = dev_get_drvdata(dev);
1452 	struct amdgpu_device *adev = drm_to_adev(ddev);
1453 	uint32_t parameter_size = 0;
1454 	long parameter[64];
1455 	char *sub_str, buf_cpy[128];
1456 	char *tmp_str;
1457 	uint32_t i = 0;
1458 	char tmp[2];
1459 	long int profile_mode = 0;
1460 	const char delimiter[3] = {' ', '\n', '\0'};
1461 
1462 	if (amdgpu_in_reset(adev))
1463 		return -EPERM;
1464 	if (adev->in_suspend && !adev->in_runpm)
1465 		return -EPERM;
1466 
1467 	tmp[0] = *(buf);
1468 	tmp[1] = '\0';
1469 	ret = kstrtol(tmp, 0, &profile_mode);
1470 	if (ret)
1471 		return -EINVAL;
1472 
1473 	if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1474 		if (count < 2 || count > 127)
1475 			return -EINVAL;
1476 		while (isspace(*++buf))
1477 			i++;
1478 		memcpy(buf_cpy, buf, count-i);
1479 		tmp_str = buf_cpy;
1480 		while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1481 			if (strlen(sub_str) == 0)
1482 				continue;
1483 			ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1484 			if (ret)
1485 				return -EINVAL;
1486 			parameter_size++;
1487 			while (isspace(*tmp_str))
1488 				tmp_str++;
1489 		}
1490 	}
1491 	parameter[parameter_size] = profile_mode;
1492 
1493 	ret = pm_runtime_get_sync(ddev->dev);
1494 	if (ret < 0) {
1495 		pm_runtime_put_autosuspend(ddev->dev);
1496 		return ret;
1497 	}
1498 
1499 	ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1500 
1501 	pm_runtime_mark_last_busy(ddev->dev);
1502 	pm_runtime_put_autosuspend(ddev->dev);
1503 
1504 	if (!ret)
1505 		return count;
1506 
1507 	return -EINVAL;
1508 }
1509 
1510 static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
1511 					   enum amd_pp_sensors sensor,
1512 					   void *query)
1513 {
1514 	int r, size = sizeof(uint32_t);
1515 
1516 	if (amdgpu_in_reset(adev))
1517 		return -EPERM;
1518 	if (adev->in_suspend && !adev->in_runpm)
1519 		return -EPERM;
1520 
1521 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
1522 	if (r < 0) {
1523 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1524 		return r;
1525 	}
1526 
1527 	/* get the sensor value */
1528 	r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1529 
1530 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
1531 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
1532 
1533 	return r;
1534 }
1535 
1536 /**
1537  * DOC: gpu_busy_percent
1538  *
1539  * The amdgpu driver provides a sysfs API for reading how busy the GPU
1540  * is as a percentage.  The file gpu_busy_percent is used for this.
1541  * The SMU firmware computes a percentage of load based on the
1542  * aggregate activity level in the IP cores.
1543  */
1544 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1545 					   struct device_attribute *attr,
1546 					   char *buf)
1547 {
1548 	struct drm_device *ddev = dev_get_drvdata(dev);
1549 	struct amdgpu_device *adev = drm_to_adev(ddev);
1550 	unsigned int value;
1551 	int r;
1552 
1553 	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1554 	if (r)
1555 		return r;
1556 
1557 	return sysfs_emit(buf, "%d\n", value);
1558 }
1559 
1560 /**
1561  * DOC: mem_busy_percent
1562  *
1563  * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1564  * is as a percentage.  The file mem_busy_percent is used for this.
1565  * The SMU firmware computes a percentage of load based on the
1566  * aggregate activity level in the IP cores.
1567  */
1568 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1569 					   struct device_attribute *attr,
1570 					   char *buf)
1571 {
1572 	struct drm_device *ddev = dev_get_drvdata(dev);
1573 	struct amdgpu_device *adev = drm_to_adev(ddev);
1574 	unsigned int value;
1575 	int r;
1576 
1577 	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1578 	if (r)
1579 		return r;
1580 
1581 	return sysfs_emit(buf, "%d\n", value);
1582 }
1583 
1584 /**
1585  * DOC: pcie_bw
1586  *
1587  * The amdgpu driver provides a sysfs API for estimating how much data
1588  * has been received and sent by the GPU in the last second through PCIe.
1589  * The file pcie_bw is used for this.
1590  * The Perf counters count the number of received and sent messages and return
1591  * those values, as well as the maximum payload size of a PCIe packet (mps).
1592  * Note that it is not possible to easily and quickly obtain the size of each
1593  * packet transmitted, so we output the max payload size (mps) to allow for
1594  * quick estimation of the PCIe bandwidth usage
1595  */
1596 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1597 		struct device_attribute *attr,
1598 		char *buf)
1599 {
1600 	struct drm_device *ddev = dev_get_drvdata(dev);
1601 	struct amdgpu_device *adev = drm_to_adev(ddev);
1602 	uint64_t count0 = 0, count1 = 0;
1603 	int ret;
1604 
1605 	if (amdgpu_in_reset(adev))
1606 		return -EPERM;
1607 	if (adev->in_suspend && !adev->in_runpm)
1608 		return -EPERM;
1609 
1610 	if (adev->flags & AMD_IS_APU)
1611 		return -ENODATA;
1612 
1613 	if (!adev->asic_funcs->get_pcie_usage)
1614 		return -ENODATA;
1615 
1616 	ret = pm_runtime_get_sync(ddev->dev);
1617 	if (ret < 0) {
1618 		pm_runtime_put_autosuspend(ddev->dev);
1619 		return ret;
1620 	}
1621 
1622 	amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1623 
1624 	pm_runtime_mark_last_busy(ddev->dev);
1625 	pm_runtime_put_autosuspend(ddev->dev);
1626 
1627 	return sysfs_emit(buf, "%llu %llu %i\n",
1628 			  count0, count1, pcie_get_mps(adev->pdev));
1629 }
1630 
1631 /**
1632  * DOC: unique_id
1633  *
1634  * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1635  * The file unique_id is used for this.
1636  * This will provide a Unique ID that will persist from machine to machine
1637  *
1638  * NOTE: This will only work for GFX9 and newer. This file will be absent
1639  * on unsupported ASICs (GFX8 and older)
1640  */
1641 static ssize_t amdgpu_get_unique_id(struct device *dev,
1642 		struct device_attribute *attr,
1643 		char *buf)
1644 {
1645 	struct drm_device *ddev = dev_get_drvdata(dev);
1646 	struct amdgpu_device *adev = drm_to_adev(ddev);
1647 
1648 	if (amdgpu_in_reset(adev))
1649 		return -EPERM;
1650 	if (adev->in_suspend && !adev->in_runpm)
1651 		return -EPERM;
1652 
1653 	if (adev->unique_id)
1654 		return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1655 
1656 	return 0;
1657 }
1658 
1659 /**
1660  * DOC: thermal_throttling_logging
1661  *
1662  * Thermal throttling pulls down the clock frequency and thus the performance.
1663  * It's an useful mechanism to protect the chip from overheating. Since it
1664  * impacts performance, the user controls whether it is enabled and if so,
1665  * the log frequency.
1666  *
1667  * Reading back the file shows you the status(enabled or disabled) and
1668  * the interval(in seconds) between each thermal logging.
1669  *
1670  * Writing an integer to the file, sets a new logging interval, in seconds.
1671  * The value should be between 1 and 3600. If the value is less than 1,
1672  * thermal logging is disabled. Values greater than 3600 are ignored.
1673  */
1674 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1675 						     struct device_attribute *attr,
1676 						     char *buf)
1677 {
1678 	struct drm_device *ddev = dev_get_drvdata(dev);
1679 	struct amdgpu_device *adev = drm_to_adev(ddev);
1680 
1681 	return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1682 			  adev_to_drm(adev)->unique,
1683 			  atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1684 			  adev->throttling_logging_rs.interval / HZ + 1);
1685 }
1686 
1687 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1688 						     struct device_attribute *attr,
1689 						     const char *buf,
1690 						     size_t count)
1691 {
1692 	struct drm_device *ddev = dev_get_drvdata(dev);
1693 	struct amdgpu_device *adev = drm_to_adev(ddev);
1694 	long throttling_logging_interval;
1695 	unsigned long flags;
1696 	int ret = 0;
1697 
1698 	ret = kstrtol(buf, 0, &throttling_logging_interval);
1699 	if (ret)
1700 		return ret;
1701 
1702 	if (throttling_logging_interval > 3600)
1703 		return -EINVAL;
1704 
1705 	if (throttling_logging_interval > 0) {
1706 		raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1707 		/*
1708 		 * Reset the ratelimit timer internals.
1709 		 * This can effectively restart the timer.
1710 		 */
1711 		adev->throttling_logging_rs.interval =
1712 			(throttling_logging_interval - 1) * HZ;
1713 		adev->throttling_logging_rs.begin = 0;
1714 		adev->throttling_logging_rs.printed = 0;
1715 		adev->throttling_logging_rs.missed = 0;
1716 		raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1717 
1718 		atomic_set(&adev->throttling_logging_enabled, 1);
1719 	} else {
1720 		atomic_set(&adev->throttling_logging_enabled, 0);
1721 	}
1722 
1723 	return count;
1724 }
1725 
1726 /**
1727  * DOC: apu_thermal_cap
1728  *
1729  * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1730  * limit temperature in millidegrees Celsius
1731  *
1732  * Reading back the file shows you core limit value
1733  *
1734  * Writing an integer to the file, sets a new thermal limit. The value
1735  * should be between 0 and 100. If the value is less than 0 or greater
1736  * than 100, then the write request will be ignored.
1737  */
1738 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1739 					 struct device_attribute *attr,
1740 					 char *buf)
1741 {
1742 	int ret, size;
1743 	u32 limit;
1744 	struct drm_device *ddev = dev_get_drvdata(dev);
1745 	struct amdgpu_device *adev = drm_to_adev(ddev);
1746 
1747 	ret = pm_runtime_get_sync(ddev->dev);
1748 	if (ret < 0) {
1749 		pm_runtime_put_autosuspend(ddev->dev);
1750 		return ret;
1751 	}
1752 
1753 	ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1754 	if (!ret)
1755 		size = sysfs_emit(buf, "%u\n", limit);
1756 	else
1757 		size = sysfs_emit(buf, "failed to get thermal limit\n");
1758 
1759 	pm_runtime_mark_last_busy(ddev->dev);
1760 	pm_runtime_put_autosuspend(ddev->dev);
1761 
1762 	return size;
1763 }
1764 
1765 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1766 					 struct device_attribute *attr,
1767 					 const char *buf,
1768 					 size_t count)
1769 {
1770 	int ret;
1771 	u32 value;
1772 	struct drm_device *ddev = dev_get_drvdata(dev);
1773 	struct amdgpu_device *adev = drm_to_adev(ddev);
1774 
1775 	ret = kstrtou32(buf, 10, &value);
1776 	if (ret)
1777 		return ret;
1778 
1779 	if (value > 100) {
1780 		dev_err(dev, "Invalid argument !\n");
1781 		return -EINVAL;
1782 	}
1783 
1784 	ret = pm_runtime_get_sync(ddev->dev);
1785 	if (ret < 0) {
1786 		pm_runtime_put_autosuspend(ddev->dev);
1787 		return ret;
1788 	}
1789 
1790 	ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1791 	if (ret) {
1792 		dev_err(dev, "failed to update thermal limit\n");
1793 		return ret;
1794 	}
1795 
1796 	pm_runtime_mark_last_busy(ddev->dev);
1797 	pm_runtime_put_autosuspend(ddev->dev);
1798 
1799 	return count;
1800 }
1801 
1802 static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
1803 					 struct amdgpu_device_attr *attr,
1804 					 uint32_t mask,
1805 					 enum amdgpu_device_attr_states *states)
1806 {
1807 	if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
1808 		*states = ATTR_STATE_UNSUPPORTED;
1809 
1810 	return 0;
1811 }
1812 
1813 static ssize_t amdgpu_get_pm_metrics(struct device *dev,
1814 				     struct device_attribute *attr, char *buf)
1815 {
1816 	struct drm_device *ddev = dev_get_drvdata(dev);
1817 	struct amdgpu_device *adev = drm_to_adev(ddev);
1818 	ssize_t size = 0;
1819 	int ret;
1820 
1821 	if (amdgpu_in_reset(adev))
1822 		return -EPERM;
1823 	if (adev->in_suspend && !adev->in_runpm)
1824 		return -EPERM;
1825 
1826 	ret = pm_runtime_get_sync(ddev->dev);
1827 	if (ret < 0) {
1828 		pm_runtime_put_autosuspend(ddev->dev);
1829 		return ret;
1830 	}
1831 
1832 	size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
1833 
1834 	pm_runtime_mark_last_busy(ddev->dev);
1835 	pm_runtime_put_autosuspend(ddev->dev);
1836 
1837 	return size;
1838 }
1839 
1840 /**
1841  * DOC: gpu_metrics
1842  *
1843  * The amdgpu driver provides a sysfs API for retrieving current gpu
1844  * metrics data. The file gpu_metrics is used for this. Reading the
1845  * file will dump all the current gpu metrics data.
1846  *
1847  * These data include temperature, frequency, engines utilization,
1848  * power consume, throttler status, fan speed and cpu core statistics(
1849  * available for APU only). That's it will give a snapshot of all sensors
1850  * at the same time.
1851  */
1852 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1853 				      struct device_attribute *attr,
1854 				      char *buf)
1855 {
1856 	struct drm_device *ddev = dev_get_drvdata(dev);
1857 	struct amdgpu_device *adev = drm_to_adev(ddev);
1858 	void *gpu_metrics;
1859 	ssize_t size = 0;
1860 	int ret;
1861 
1862 	if (amdgpu_in_reset(adev))
1863 		return -EPERM;
1864 	if (adev->in_suspend && !adev->in_runpm)
1865 		return -EPERM;
1866 
1867 	ret = pm_runtime_get_sync(ddev->dev);
1868 	if (ret < 0) {
1869 		pm_runtime_put_autosuspend(ddev->dev);
1870 		return ret;
1871 	}
1872 
1873 	size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1874 	if (size <= 0)
1875 		goto out;
1876 
1877 	if (size >= PAGE_SIZE)
1878 		size = PAGE_SIZE - 1;
1879 
1880 	memcpy(buf, gpu_metrics, size);
1881 
1882 out:
1883 	pm_runtime_mark_last_busy(ddev->dev);
1884 	pm_runtime_put_autosuspend(ddev->dev);
1885 
1886 	return size;
1887 }
1888 
1889 static int amdgpu_show_powershift_percent(struct device *dev,
1890 					char *buf, enum amd_pp_sensors sensor)
1891 {
1892 	struct drm_device *ddev = dev_get_drvdata(dev);
1893 	struct amdgpu_device *adev = drm_to_adev(ddev);
1894 	uint32_t ss_power;
1895 	int r = 0, i;
1896 
1897 	r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1898 	if (r == -EOPNOTSUPP) {
1899 		/* sensor not available on dGPU, try to read from APU */
1900 		adev = NULL;
1901 		mutex_lock(&mgpu_info.mutex);
1902 		for (i = 0; i < mgpu_info.num_gpu; i++) {
1903 			if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1904 				adev = mgpu_info.gpu_ins[i].adev;
1905 				break;
1906 			}
1907 		}
1908 		mutex_unlock(&mgpu_info.mutex);
1909 		if (adev)
1910 			r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1911 	}
1912 
1913 	if (r)
1914 		return r;
1915 
1916 	return sysfs_emit(buf, "%u%%\n", ss_power);
1917 }
1918 
1919 /**
1920  * DOC: smartshift_apu_power
1921  *
1922  * The amdgpu driver provides a sysfs API for reporting APU power
1923  * shift in percentage if platform supports smartshift. Value 0 means that
1924  * there is no powershift and values between [1-100] means that the power
1925  * is shifted to APU, the percentage of boost is with respect to APU power
1926  * limit on the platform.
1927  */
1928 
1929 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1930 					       char *buf)
1931 {
1932 	return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1933 }
1934 
1935 /**
1936  * DOC: smartshift_dgpu_power
1937  *
1938  * The amdgpu driver provides a sysfs API for reporting dGPU power
1939  * shift in percentage if platform supports smartshift. Value 0 means that
1940  * there is no powershift and values between [1-100] means that the power is
1941  * shifted to dGPU, the percentage of boost is with respect to dGPU power
1942  * limit on the platform.
1943  */
1944 
1945 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1946 						char *buf)
1947 {
1948 	return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1949 }
1950 
1951 /**
1952  * DOC: smartshift_bias
1953  *
1954  * The amdgpu driver provides a sysfs API for reporting the
1955  * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1956  * and the default is 0. -100 sets maximum preference to APU
1957  * and 100 sets max perference to dGPU.
1958  */
1959 
1960 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1961 					  struct device_attribute *attr,
1962 					  char *buf)
1963 {
1964 	int r = 0;
1965 
1966 	r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1967 
1968 	return r;
1969 }
1970 
1971 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1972 					  struct device_attribute *attr,
1973 					  const char *buf, size_t count)
1974 {
1975 	struct drm_device *ddev = dev_get_drvdata(dev);
1976 	struct amdgpu_device *adev = drm_to_adev(ddev);
1977 	int r = 0;
1978 	int bias = 0;
1979 
1980 	if (amdgpu_in_reset(adev))
1981 		return -EPERM;
1982 	if (adev->in_suspend && !adev->in_runpm)
1983 		return -EPERM;
1984 
1985 	r = pm_runtime_get_sync(ddev->dev);
1986 	if (r < 0) {
1987 		pm_runtime_put_autosuspend(ddev->dev);
1988 		return r;
1989 	}
1990 
1991 	r = kstrtoint(buf, 10, &bias);
1992 	if (r)
1993 		goto out;
1994 
1995 	if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1996 		bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1997 	else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1998 		bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1999 
2000 	amdgpu_smartshift_bias = bias;
2001 	r = count;
2002 
2003 	/* TODO: update bias level with SMU message */
2004 
2005 out:
2006 	pm_runtime_mark_last_busy(ddev->dev);
2007 	pm_runtime_put_autosuspend(ddev->dev);
2008 	return r;
2009 }
2010 
2011 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2012 				uint32_t mask, enum amdgpu_device_attr_states *states)
2013 {
2014 	if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
2015 		*states = ATTR_STATE_UNSUPPORTED;
2016 
2017 	return 0;
2018 }
2019 
2020 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2021 			       uint32_t mask, enum amdgpu_device_attr_states *states)
2022 {
2023 	uint32_t ss_power;
2024 
2025 	if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
2026 		*states = ATTR_STATE_UNSUPPORTED;
2027 	else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
2028 		 (void *)&ss_power))
2029 		*states = ATTR_STATE_UNSUPPORTED;
2030 	else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
2031 		 (void *)&ss_power))
2032 		*states = ATTR_STATE_UNSUPPORTED;
2033 
2034 	return 0;
2035 }
2036 
2037 static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2038 					 uint32_t mask, enum amdgpu_device_attr_states *states)
2039 {
2040 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2041 
2042 	*states = ATTR_STATE_SUPPORTED;
2043 
2044 	if (!amdgpu_dpm_is_overdrive_supported(adev)) {
2045 		*states = ATTR_STATE_UNSUPPORTED;
2046 		return 0;
2047 	}
2048 
2049 	/* Enable pp_od_clk_voltage node for gc 9.4.3 SRIOV/BM support */
2050 	if (gc_ver == IP_VERSION(9, 4, 3)) {
2051 		if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2052 			*states = ATTR_STATE_UNSUPPORTED;
2053 		return 0;
2054 	}
2055 
2056 	if (!(attr->flags & mask))
2057 		*states = ATTR_STATE_UNSUPPORTED;
2058 
2059 	return 0;
2060 }
2061 
2062 static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2063 				      uint32_t mask, enum amdgpu_device_attr_states *states)
2064 {
2065 	struct device_attribute *dev_attr = &attr->dev_attr;
2066 	uint32_t gc_ver;
2067 
2068 	*states = ATTR_STATE_SUPPORTED;
2069 
2070 	if (!(attr->flags & mask)) {
2071 		*states = ATTR_STATE_UNSUPPORTED;
2072 		return 0;
2073 	}
2074 
2075 	gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2076 	/* dcefclk node is not available on gfx 11.0.3 sriov */
2077 	if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
2078 	    gc_ver < IP_VERSION(9, 0, 0) ||
2079 	    !amdgpu_device_has_display_hardware(adev))
2080 		*states = ATTR_STATE_UNSUPPORTED;
2081 
2082 	/* SMU MP1 does not support dcefclk level setting,
2083 	 * setting should not be allowed from VF if not in one VF mode.
2084 	 */
2085 	if (gc_ver >= IP_VERSION(10, 0, 0) ||
2086 	    (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))) {
2087 		dev_attr->attr.mode &= ~S_IWUGO;
2088 		dev_attr->store = NULL;
2089 	}
2090 
2091 	return 0;
2092 }
2093 
2094 /* Following items will be read out to indicate current plpd policy:
2095  *  - -1: none
2096  *  - 0: disallow
2097  *  - 1: default
2098  *  - 2: optimized
2099  */
2100 static ssize_t amdgpu_get_xgmi_plpd_policy(struct device *dev,
2101 					   struct device_attribute *attr,
2102 					   char *buf)
2103 {
2104 	struct drm_device *ddev = dev_get_drvdata(dev);
2105 	struct amdgpu_device *adev = drm_to_adev(ddev);
2106 	char *mode_desc = "none";
2107 	int mode;
2108 
2109 	if (amdgpu_in_reset(adev))
2110 		return -EPERM;
2111 	if (adev->in_suspend && !adev->in_runpm)
2112 		return -EPERM;
2113 
2114 	mode = amdgpu_dpm_get_xgmi_plpd_mode(adev, &mode_desc);
2115 
2116 	return sysfs_emit(buf, "%d: %s\n", mode, mode_desc);
2117 }
2118 
2119 /* Following argument value is expected from user to change plpd policy
2120  *  - arg 0: disallow plpd
2121  *  - arg 1: default policy
2122  *  - arg 2: optimized policy
2123  */
2124 static ssize_t amdgpu_set_xgmi_plpd_policy(struct device *dev,
2125 					   struct device_attribute *attr,
2126 					   const char *buf, size_t count)
2127 {
2128 	struct drm_device *ddev = dev_get_drvdata(dev);
2129 	struct amdgpu_device *adev = drm_to_adev(ddev);
2130 	int mode, ret;
2131 
2132 	if (amdgpu_in_reset(adev))
2133 		return -EPERM;
2134 	if (adev->in_suspend && !adev->in_runpm)
2135 		return -EPERM;
2136 
2137 	ret = kstrtos32(buf, 0, &mode);
2138 	if (ret)
2139 		return -EINVAL;
2140 
2141 	ret = pm_runtime_get_sync(ddev->dev);
2142 	if (ret < 0) {
2143 		pm_runtime_put_autosuspend(ddev->dev);
2144 		return ret;
2145 	}
2146 
2147 	ret = amdgpu_dpm_set_xgmi_plpd_mode(adev, mode);
2148 
2149 	pm_runtime_mark_last_busy(ddev->dev);
2150 	pm_runtime_put_autosuspend(ddev->dev);
2151 
2152 	if (ret)
2153 		return ret;
2154 
2155 	return count;
2156 }
2157 
2158 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2159 	AMDGPU_DEVICE_ATTR_RW(power_dpm_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2160 	AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,	ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2161 	AMDGPU_DEVICE_ATTR_RO(pp_num_states,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2162 	AMDGPU_DEVICE_ATTR_RO(pp_cur_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2163 	AMDGPU_DEVICE_ATTR_RW(pp_force_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2164 	AMDGPU_DEVICE_ATTR_RW(pp_table,					ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2165 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2166 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2167 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2168 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2169 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2170 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2171 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2172 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2173 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2174 			      .attr_update = pp_dpm_dcefclk_attr_update),
2175 	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2176 	AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,				ATTR_FLAG_BASIC),
2177 	AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,				ATTR_FLAG_BASIC),
2178 	AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,			ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2179 	AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage,			ATTR_FLAG_BASIC,
2180 			      .attr_update = pp_od_clk_voltage_attr_update),
2181 	AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2182 	AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2183 	AMDGPU_DEVICE_ATTR_RO(pcie_bw,					ATTR_FLAG_BASIC),
2184 	AMDGPU_DEVICE_ATTR_RW(pp_features,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2185 	AMDGPU_DEVICE_ATTR_RO(unique_id,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2186 	AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,		ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2187 	AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2188 	AMDGPU_DEVICE_ATTR_RO(gpu_metrics,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2189 	AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power,			ATTR_FLAG_BASIC,
2190 			      .attr_update = ss_power_attr_update),
2191 	AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power,			ATTR_FLAG_BASIC,
2192 			      .attr_update = ss_power_attr_update),
2193 	AMDGPU_DEVICE_ATTR_RW(smartshift_bias,				ATTR_FLAG_BASIC,
2194 			      .attr_update = ss_bias_attr_update),
2195 	AMDGPU_DEVICE_ATTR_RW(xgmi_plpd_policy,				ATTR_FLAG_BASIC),
2196 	AMDGPU_DEVICE_ATTR_RO(pm_metrics,				ATTR_FLAG_BASIC,
2197 			      .attr_update = amdgpu_pm_metrics_attr_update),
2198 };
2199 
2200 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2201 			       uint32_t mask, enum amdgpu_device_attr_states *states)
2202 {
2203 	struct device_attribute *dev_attr = &attr->dev_attr;
2204 	uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
2205 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2206 	const char *attr_name = dev_attr->attr.name;
2207 
2208 	if (!(attr->flags & mask)) {
2209 		*states = ATTR_STATE_UNSUPPORTED;
2210 		return 0;
2211 	}
2212 
2213 #define DEVICE_ATTR_IS(_name)	(!strcmp(attr_name, #_name))
2214 
2215 	if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2216 		if (gc_ver < IP_VERSION(9, 0, 0))
2217 			*states = ATTR_STATE_UNSUPPORTED;
2218 	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2219 		if (mp1_ver < IP_VERSION(10, 0, 0))
2220 			*states = ATTR_STATE_UNSUPPORTED;
2221 	} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2222 		if ((adev->flags & AMD_IS_APU &&
2223 		     gc_ver != IP_VERSION(9, 4, 3)) ||
2224 		    gc_ver == IP_VERSION(9, 0, 1))
2225 			*states = ATTR_STATE_UNSUPPORTED;
2226 	} else if (DEVICE_ATTR_IS(pcie_bw)) {
2227 		/* PCIe Perf counters won't work on APU nodes */
2228 		if (adev->flags & AMD_IS_APU ||
2229 		    !adev->asic_funcs->get_pcie_usage)
2230 			*states = ATTR_STATE_UNSUPPORTED;
2231 	} else if (DEVICE_ATTR_IS(unique_id)) {
2232 		switch (gc_ver) {
2233 		case IP_VERSION(9, 0, 1):
2234 		case IP_VERSION(9, 4, 0):
2235 		case IP_VERSION(9, 4, 1):
2236 		case IP_VERSION(9, 4, 2):
2237 		case IP_VERSION(9, 4, 3):
2238 		case IP_VERSION(10, 3, 0):
2239 		case IP_VERSION(11, 0, 0):
2240 		case IP_VERSION(11, 0, 1):
2241 		case IP_VERSION(11, 0, 2):
2242 		case IP_VERSION(11, 0, 3):
2243 			*states = ATTR_STATE_SUPPORTED;
2244 			break;
2245 		default:
2246 			*states = ATTR_STATE_UNSUPPORTED;
2247 		}
2248 	} else if (DEVICE_ATTR_IS(pp_features)) {
2249 		if ((adev->flags & AMD_IS_APU &&
2250 		     gc_ver != IP_VERSION(9, 4, 3)) ||
2251 		    gc_ver < IP_VERSION(9, 0, 0))
2252 			*states = ATTR_STATE_UNSUPPORTED;
2253 	} else if (DEVICE_ATTR_IS(gpu_metrics)) {
2254 		if (gc_ver < IP_VERSION(9, 1, 0))
2255 			*states = ATTR_STATE_UNSUPPORTED;
2256 	} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2257 		if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2258 		      gc_ver == IP_VERSION(10, 3, 0) ||
2259 		      gc_ver == IP_VERSION(10, 1, 2) ||
2260 		      gc_ver == IP_VERSION(11, 0, 0) ||
2261 		      gc_ver == IP_VERSION(11, 0, 2) ||
2262 		      gc_ver == IP_VERSION(11, 0, 3) ||
2263 		      gc_ver == IP_VERSION(9, 4, 3)))
2264 			*states = ATTR_STATE_UNSUPPORTED;
2265 	} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2266 		if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2267 			   gc_ver == IP_VERSION(10, 3, 0) ||
2268 			   gc_ver == IP_VERSION(11, 0, 2) ||
2269 			   gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2270 			*states = ATTR_STATE_UNSUPPORTED;
2271 	} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2272 		if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2273 		      gc_ver == IP_VERSION(10, 3, 0) ||
2274 		      gc_ver == IP_VERSION(10, 1, 2) ||
2275 		      gc_ver == IP_VERSION(11, 0, 0) ||
2276 		      gc_ver == IP_VERSION(11, 0, 2) ||
2277 		      gc_ver == IP_VERSION(11, 0, 3) ||
2278 		      gc_ver == IP_VERSION(9, 4, 3)))
2279 			*states = ATTR_STATE_UNSUPPORTED;
2280 	} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2281 		if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2282 			   gc_ver == IP_VERSION(10, 3, 0) ||
2283 			   gc_ver == IP_VERSION(11, 0, 2) ||
2284 			   gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2285 			*states = ATTR_STATE_UNSUPPORTED;
2286 	} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2287 		if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2288 			*states = ATTR_STATE_UNSUPPORTED;
2289 		else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2290 			  gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
2291 			*states = ATTR_STATE_UNSUPPORTED;
2292 	} else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
2293 		if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
2294 			*states = ATTR_STATE_UNSUPPORTED;
2295 	} else if (DEVICE_ATTR_IS(pp_mclk_od)) {
2296 		if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2297 			*states = ATTR_STATE_UNSUPPORTED;
2298 	} else if (DEVICE_ATTR_IS(pp_sclk_od)) {
2299 		if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2300 			*states = ATTR_STATE_UNSUPPORTED;
2301 	} else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2302 		u32 limit;
2303 
2304 		if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2305 		    -EOPNOTSUPP)
2306 			*states = ATTR_STATE_UNSUPPORTED;
2307 	} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2308 		if (gc_ver == IP_VERSION(9, 4, 2) ||
2309 		    gc_ver == IP_VERSION(9, 4, 3))
2310 			*states = ATTR_STATE_UNSUPPORTED;
2311 	}
2312 
2313 	switch (gc_ver) {
2314 	case IP_VERSION(9, 4, 1):
2315 	case IP_VERSION(9, 4, 2):
2316 		/* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2317 		if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2318 		    DEVICE_ATTR_IS(pp_dpm_socclk) ||
2319 		    DEVICE_ATTR_IS(pp_dpm_fclk)) {
2320 			dev_attr->attr.mode &= ~S_IWUGO;
2321 			dev_attr->store = NULL;
2322 		}
2323 		break;
2324 	case IP_VERSION(10, 3, 0):
2325 		if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2326 		    amdgpu_sriov_vf(adev)) {
2327 			dev_attr->attr.mode &= ~0222;
2328 			dev_attr->store = NULL;
2329 		}
2330 		break;
2331 	default:
2332 		break;
2333 	}
2334 
2335 	/* setting should not be allowed from VF if not in one VF mode */
2336 	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
2337 		dev_attr->attr.mode &= ~S_IWUGO;
2338 		dev_attr->store = NULL;
2339 	}
2340 
2341 #undef DEVICE_ATTR_IS
2342 
2343 	return 0;
2344 }
2345 
2346 
2347 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2348 				     struct amdgpu_device_attr *attr,
2349 				     uint32_t mask, struct list_head *attr_list)
2350 {
2351 	int ret = 0;
2352 	enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2353 	struct amdgpu_device_attr_entry *attr_entry;
2354 	struct device_attribute *dev_attr;
2355 	const char *name;
2356 
2357 	int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2358 			   uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2359 
2360 	if (!attr)
2361 		return -EINVAL;
2362 
2363 	dev_attr = &attr->dev_attr;
2364 	name = dev_attr->attr.name;
2365 
2366 	attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2367 
2368 	ret = attr_update(adev, attr, mask, &attr_states);
2369 	if (ret) {
2370 		dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2371 			name, ret);
2372 		return ret;
2373 	}
2374 
2375 	if (attr_states == ATTR_STATE_UNSUPPORTED)
2376 		return 0;
2377 
2378 	ret = device_create_file(adev->dev, dev_attr);
2379 	if (ret) {
2380 		dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2381 			name, ret);
2382 	}
2383 
2384 	attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2385 	if (!attr_entry)
2386 		return -ENOMEM;
2387 
2388 	attr_entry->attr = attr;
2389 	INIT_LIST_HEAD(&attr_entry->entry);
2390 
2391 	list_add_tail(&attr_entry->entry, attr_list);
2392 
2393 	return ret;
2394 }
2395 
2396 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2397 {
2398 	struct device_attribute *dev_attr = &attr->dev_attr;
2399 
2400 	device_remove_file(adev->dev, dev_attr);
2401 }
2402 
2403 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2404 					     struct list_head *attr_list);
2405 
2406 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2407 					    struct amdgpu_device_attr *attrs,
2408 					    uint32_t counts,
2409 					    uint32_t mask,
2410 					    struct list_head *attr_list)
2411 {
2412 	int ret = 0;
2413 	uint32_t i = 0;
2414 
2415 	for (i = 0; i < counts; i++) {
2416 		ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2417 		if (ret)
2418 			goto failed;
2419 	}
2420 
2421 	return 0;
2422 
2423 failed:
2424 	amdgpu_device_attr_remove_groups(adev, attr_list);
2425 
2426 	return ret;
2427 }
2428 
2429 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2430 					     struct list_head *attr_list)
2431 {
2432 	struct amdgpu_device_attr_entry *entry, *entry_tmp;
2433 
2434 	if (list_empty(attr_list))
2435 		return ;
2436 
2437 	list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2438 		amdgpu_device_attr_remove(adev, entry->attr);
2439 		list_del(&entry->entry);
2440 		kfree(entry);
2441 	}
2442 }
2443 
2444 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2445 				      struct device_attribute *attr,
2446 				      char *buf)
2447 {
2448 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2449 	int channel = to_sensor_dev_attr(attr)->index;
2450 	int r, temp = 0;
2451 
2452 	if (channel >= PP_TEMP_MAX)
2453 		return -EINVAL;
2454 
2455 	switch (channel) {
2456 	case PP_TEMP_JUNCTION:
2457 		/* get current junction temperature */
2458 		r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2459 					   (void *)&temp);
2460 		break;
2461 	case PP_TEMP_EDGE:
2462 		/* get current edge temperature */
2463 		r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2464 					   (void *)&temp);
2465 		break;
2466 	case PP_TEMP_MEM:
2467 		/* get current memory temperature */
2468 		r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2469 					   (void *)&temp);
2470 		break;
2471 	default:
2472 		r = -EINVAL;
2473 		break;
2474 	}
2475 
2476 	if (r)
2477 		return r;
2478 
2479 	return sysfs_emit(buf, "%d\n", temp);
2480 }
2481 
2482 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2483 					     struct device_attribute *attr,
2484 					     char *buf)
2485 {
2486 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2487 	int hyst = to_sensor_dev_attr(attr)->index;
2488 	int temp;
2489 
2490 	if (hyst)
2491 		temp = adev->pm.dpm.thermal.min_temp;
2492 	else
2493 		temp = adev->pm.dpm.thermal.max_temp;
2494 
2495 	return sysfs_emit(buf, "%d\n", temp);
2496 }
2497 
2498 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2499 					     struct device_attribute *attr,
2500 					     char *buf)
2501 {
2502 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2503 	int hyst = to_sensor_dev_attr(attr)->index;
2504 	int temp;
2505 
2506 	if (hyst)
2507 		temp = adev->pm.dpm.thermal.min_hotspot_temp;
2508 	else
2509 		temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2510 
2511 	return sysfs_emit(buf, "%d\n", temp);
2512 }
2513 
2514 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2515 					     struct device_attribute *attr,
2516 					     char *buf)
2517 {
2518 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2519 	int hyst = to_sensor_dev_attr(attr)->index;
2520 	int temp;
2521 
2522 	if (hyst)
2523 		temp = adev->pm.dpm.thermal.min_mem_temp;
2524 	else
2525 		temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2526 
2527 	return sysfs_emit(buf, "%d\n", temp);
2528 }
2529 
2530 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2531 					     struct device_attribute *attr,
2532 					     char *buf)
2533 {
2534 	int channel = to_sensor_dev_attr(attr)->index;
2535 
2536 	if (channel >= PP_TEMP_MAX)
2537 		return -EINVAL;
2538 
2539 	return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2540 }
2541 
2542 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2543 					     struct device_attribute *attr,
2544 					     char *buf)
2545 {
2546 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2547 	int channel = to_sensor_dev_attr(attr)->index;
2548 	int temp = 0;
2549 
2550 	if (channel >= PP_TEMP_MAX)
2551 		return -EINVAL;
2552 
2553 	switch (channel) {
2554 	case PP_TEMP_JUNCTION:
2555 		temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2556 		break;
2557 	case PP_TEMP_EDGE:
2558 		temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2559 		break;
2560 	case PP_TEMP_MEM:
2561 		temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2562 		break;
2563 	}
2564 
2565 	return sysfs_emit(buf, "%d\n", temp);
2566 }
2567 
2568 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2569 					    struct device_attribute *attr,
2570 					    char *buf)
2571 {
2572 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2573 	u32 pwm_mode = 0;
2574 	int ret;
2575 
2576 	if (amdgpu_in_reset(adev))
2577 		return -EPERM;
2578 	if (adev->in_suspend && !adev->in_runpm)
2579 		return -EPERM;
2580 
2581 	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2582 	if (ret < 0) {
2583 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2584 		return ret;
2585 	}
2586 
2587 	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2588 
2589 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2590 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2591 
2592 	if (ret)
2593 		return -EINVAL;
2594 
2595 	return sysfs_emit(buf, "%u\n", pwm_mode);
2596 }
2597 
2598 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2599 					    struct device_attribute *attr,
2600 					    const char *buf,
2601 					    size_t count)
2602 {
2603 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2604 	int err, ret;
2605 	u32 pwm_mode;
2606 	int value;
2607 
2608 	if (amdgpu_in_reset(adev))
2609 		return -EPERM;
2610 	if (adev->in_suspend && !adev->in_runpm)
2611 		return -EPERM;
2612 
2613 	err = kstrtoint(buf, 10, &value);
2614 	if (err)
2615 		return err;
2616 
2617 	if (value == 0)
2618 		pwm_mode = AMD_FAN_CTRL_NONE;
2619 	else if (value == 1)
2620 		pwm_mode = AMD_FAN_CTRL_MANUAL;
2621 	else if (value == 2)
2622 		pwm_mode = AMD_FAN_CTRL_AUTO;
2623 	else
2624 		return -EINVAL;
2625 
2626 	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2627 	if (ret < 0) {
2628 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2629 		return ret;
2630 	}
2631 
2632 	ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2633 
2634 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2635 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2636 
2637 	if (ret)
2638 		return -EINVAL;
2639 
2640 	return count;
2641 }
2642 
2643 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2644 					 struct device_attribute *attr,
2645 					 char *buf)
2646 {
2647 	return sysfs_emit(buf, "%i\n", 0);
2648 }
2649 
2650 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2651 					 struct device_attribute *attr,
2652 					 char *buf)
2653 {
2654 	return sysfs_emit(buf, "%i\n", 255);
2655 }
2656 
2657 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2658 				     struct device_attribute *attr,
2659 				     const char *buf, size_t count)
2660 {
2661 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2662 	int err;
2663 	u32 value;
2664 	u32 pwm_mode;
2665 
2666 	if (amdgpu_in_reset(adev))
2667 		return -EPERM;
2668 	if (adev->in_suspend && !adev->in_runpm)
2669 		return -EPERM;
2670 
2671 	err = kstrtou32(buf, 10, &value);
2672 	if (err)
2673 		return err;
2674 
2675 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2676 	if (err < 0) {
2677 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2678 		return err;
2679 	}
2680 
2681 	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2682 	if (err)
2683 		goto out;
2684 
2685 	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2686 		pr_info("manual fan speed control should be enabled first\n");
2687 		err = -EINVAL;
2688 		goto out;
2689 	}
2690 
2691 	err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2692 
2693 out:
2694 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2695 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2696 
2697 	if (err)
2698 		return err;
2699 
2700 	return count;
2701 }
2702 
2703 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2704 				     struct device_attribute *attr,
2705 				     char *buf)
2706 {
2707 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2708 	int err;
2709 	u32 speed = 0;
2710 
2711 	if (amdgpu_in_reset(adev))
2712 		return -EPERM;
2713 	if (adev->in_suspend && !adev->in_runpm)
2714 		return -EPERM;
2715 
2716 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2717 	if (err < 0) {
2718 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2719 		return err;
2720 	}
2721 
2722 	err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2723 
2724 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2725 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2726 
2727 	if (err)
2728 		return err;
2729 
2730 	return sysfs_emit(buf, "%i\n", speed);
2731 }
2732 
2733 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2734 					   struct device_attribute *attr,
2735 					   char *buf)
2736 {
2737 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2738 	int err;
2739 	u32 speed = 0;
2740 
2741 	if (amdgpu_in_reset(adev))
2742 		return -EPERM;
2743 	if (adev->in_suspend && !adev->in_runpm)
2744 		return -EPERM;
2745 
2746 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2747 	if (err < 0) {
2748 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2749 		return err;
2750 	}
2751 
2752 	err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2753 
2754 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2755 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2756 
2757 	if (err)
2758 		return err;
2759 
2760 	return sysfs_emit(buf, "%i\n", speed);
2761 }
2762 
2763 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2764 					 struct device_attribute *attr,
2765 					 char *buf)
2766 {
2767 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2768 	u32 min_rpm = 0;
2769 	int r;
2770 
2771 	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2772 				   (void *)&min_rpm);
2773 
2774 	if (r)
2775 		return r;
2776 
2777 	return sysfs_emit(buf, "%d\n", min_rpm);
2778 }
2779 
2780 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2781 					 struct device_attribute *attr,
2782 					 char *buf)
2783 {
2784 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2785 	u32 max_rpm = 0;
2786 	int r;
2787 
2788 	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2789 				   (void *)&max_rpm);
2790 
2791 	if (r)
2792 		return r;
2793 
2794 	return sysfs_emit(buf, "%d\n", max_rpm);
2795 }
2796 
2797 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2798 					   struct device_attribute *attr,
2799 					   char *buf)
2800 {
2801 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2802 	int err;
2803 	u32 rpm = 0;
2804 
2805 	if (amdgpu_in_reset(adev))
2806 		return -EPERM;
2807 	if (adev->in_suspend && !adev->in_runpm)
2808 		return -EPERM;
2809 
2810 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2811 	if (err < 0) {
2812 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2813 		return err;
2814 	}
2815 
2816 	err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2817 
2818 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2819 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2820 
2821 	if (err)
2822 		return err;
2823 
2824 	return sysfs_emit(buf, "%i\n", rpm);
2825 }
2826 
2827 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2828 				     struct device_attribute *attr,
2829 				     const char *buf, size_t count)
2830 {
2831 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2832 	int err;
2833 	u32 value;
2834 	u32 pwm_mode;
2835 
2836 	if (amdgpu_in_reset(adev))
2837 		return -EPERM;
2838 	if (adev->in_suspend && !adev->in_runpm)
2839 		return -EPERM;
2840 
2841 	err = kstrtou32(buf, 10, &value);
2842 	if (err)
2843 		return err;
2844 
2845 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2846 	if (err < 0) {
2847 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2848 		return err;
2849 	}
2850 
2851 	err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2852 	if (err)
2853 		goto out;
2854 
2855 	if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2856 		err = -ENODATA;
2857 		goto out;
2858 	}
2859 
2860 	err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2861 
2862 out:
2863 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2864 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2865 
2866 	if (err)
2867 		return err;
2868 
2869 	return count;
2870 }
2871 
2872 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2873 					    struct device_attribute *attr,
2874 					    char *buf)
2875 {
2876 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2877 	u32 pwm_mode = 0;
2878 	int ret;
2879 
2880 	if (amdgpu_in_reset(adev))
2881 		return -EPERM;
2882 	if (adev->in_suspend && !adev->in_runpm)
2883 		return -EPERM;
2884 
2885 	ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2886 	if (ret < 0) {
2887 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2888 		return ret;
2889 	}
2890 
2891 	ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2892 
2893 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2894 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2895 
2896 	if (ret)
2897 		return -EINVAL;
2898 
2899 	return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2900 }
2901 
2902 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2903 					    struct device_attribute *attr,
2904 					    const char *buf,
2905 					    size_t count)
2906 {
2907 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2908 	int err;
2909 	int value;
2910 	u32 pwm_mode;
2911 
2912 	if (amdgpu_in_reset(adev))
2913 		return -EPERM;
2914 	if (adev->in_suspend && !adev->in_runpm)
2915 		return -EPERM;
2916 
2917 	err = kstrtoint(buf, 10, &value);
2918 	if (err)
2919 		return err;
2920 
2921 	if (value == 0)
2922 		pwm_mode = AMD_FAN_CTRL_AUTO;
2923 	else if (value == 1)
2924 		pwm_mode = AMD_FAN_CTRL_MANUAL;
2925 	else
2926 		return -EINVAL;
2927 
2928 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2929 	if (err < 0) {
2930 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2931 		return err;
2932 	}
2933 
2934 	err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2935 
2936 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2937 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2938 
2939 	if (err)
2940 		return -EINVAL;
2941 
2942 	return count;
2943 }
2944 
2945 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2946 					struct device_attribute *attr,
2947 					char *buf)
2948 {
2949 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2950 	u32 vddgfx;
2951 	int r;
2952 
2953 	/* get the voltage */
2954 	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
2955 				   (void *)&vddgfx);
2956 	if (r)
2957 		return r;
2958 
2959 	return sysfs_emit(buf, "%d\n", vddgfx);
2960 }
2961 
2962 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2963 					      struct device_attribute *attr,
2964 					      char *buf)
2965 {
2966 	return sysfs_emit(buf, "vddgfx\n");
2967 }
2968 
2969 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2970 				       struct device_attribute *attr,
2971 				       char *buf)
2972 {
2973 	struct amdgpu_device *adev = dev_get_drvdata(dev);
2974 	u32 vddnb;
2975 	int r;
2976 
2977 	/* only APUs have vddnb */
2978 	if  (!(adev->flags & AMD_IS_APU))
2979 		return -EINVAL;
2980 
2981 	/* get the voltage */
2982 	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
2983 				   (void *)&vddnb);
2984 	if (r)
2985 		return r;
2986 
2987 	return sysfs_emit(buf, "%d\n", vddnb);
2988 }
2989 
2990 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2991 					      struct device_attribute *attr,
2992 					      char *buf)
2993 {
2994 	return sysfs_emit(buf, "vddnb\n");
2995 }
2996 
2997 static int amdgpu_hwmon_get_power(struct device *dev,
2998 				  enum amd_pp_sensors sensor)
2999 {
3000 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3001 	unsigned int uw;
3002 	u32 query = 0;
3003 	int r;
3004 
3005 	r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
3006 	if (r)
3007 		return r;
3008 
3009 	/* convert to microwatts */
3010 	uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3011 
3012 	return uw;
3013 }
3014 
3015 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3016 					   struct device_attribute *attr,
3017 					   char *buf)
3018 {
3019 	ssize_t val;
3020 
3021 	val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
3022 	if (val < 0)
3023 		return val;
3024 
3025 	return sysfs_emit(buf, "%zd\n", val);
3026 }
3027 
3028 static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
3029 					     struct device_attribute *attr,
3030 					     char *buf)
3031 {
3032 	ssize_t val;
3033 
3034 	val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
3035 	if (val < 0)
3036 		return val;
3037 
3038 	return sysfs_emit(buf, "%zd\n", val);
3039 }
3040 
3041 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
3042 					struct device_attribute *attr,
3043 					char *buf,
3044 					enum pp_power_limit_level pp_limit_level)
3045 {
3046 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3047 	enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
3048 	uint32_t limit;
3049 	ssize_t size;
3050 	int r;
3051 
3052 	if (amdgpu_in_reset(adev))
3053 		return -EPERM;
3054 	if (adev->in_suspend && !adev->in_runpm)
3055 		return -EPERM;
3056 
3057 	r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3058 	if (r < 0) {
3059 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3060 		return r;
3061 	}
3062 
3063 	r = amdgpu_dpm_get_power_limit(adev, &limit,
3064 				      pp_limit_level, power_type);
3065 
3066 	if (!r)
3067 		size = sysfs_emit(buf, "%u\n", limit * 1000000);
3068 	else
3069 		size = sysfs_emit(buf, "\n");
3070 
3071 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3072 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3073 
3074 	return size;
3075 }
3076 
3077 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3078 					 struct device_attribute *attr,
3079 					 char *buf)
3080 {
3081 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
3082 }
3083 
3084 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3085 					 struct device_attribute *attr,
3086 					 char *buf)
3087 {
3088 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3089 
3090 }
3091 
3092 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3093 					 struct device_attribute *attr,
3094 					 char *buf)
3095 {
3096 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3097 
3098 }
3099 
3100 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3101 					 struct device_attribute *attr,
3102 					 char *buf)
3103 {
3104 	return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3105 
3106 }
3107 
3108 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3109 					 struct device_attribute *attr,
3110 					 char *buf)
3111 {
3112 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3113 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3114 
3115 	if (gc_ver == IP_VERSION(10, 3, 1))
3116 		return sysfs_emit(buf, "%s\n",
3117 				  to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3118 				  "fastPPT" : "slowPPT");
3119 	else
3120 		return sysfs_emit(buf, "PPT\n");
3121 }
3122 
3123 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3124 		struct device_attribute *attr,
3125 		const char *buf,
3126 		size_t count)
3127 {
3128 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3129 	int limit_type = to_sensor_dev_attr(attr)->index;
3130 	int err;
3131 	u32 value;
3132 
3133 	if (amdgpu_in_reset(adev))
3134 		return -EPERM;
3135 	if (adev->in_suspend && !adev->in_runpm)
3136 		return -EPERM;
3137 
3138 	if (amdgpu_sriov_vf(adev))
3139 		return -EINVAL;
3140 
3141 	err = kstrtou32(buf, 10, &value);
3142 	if (err)
3143 		return err;
3144 
3145 	value = value / 1000000; /* convert to Watt */
3146 	value |= limit_type << 24;
3147 
3148 	err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3149 	if (err < 0) {
3150 		pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3151 		return err;
3152 	}
3153 
3154 	err = amdgpu_dpm_set_power_limit(adev, value);
3155 
3156 	pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3157 	pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3158 
3159 	if (err)
3160 		return err;
3161 
3162 	return count;
3163 }
3164 
3165 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3166 				      struct device_attribute *attr,
3167 				      char *buf)
3168 {
3169 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3170 	uint32_t sclk;
3171 	int r;
3172 
3173 	/* get the sclk */
3174 	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3175 				   (void *)&sclk);
3176 	if (r)
3177 		return r;
3178 
3179 	return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3180 }
3181 
3182 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3183 					    struct device_attribute *attr,
3184 					    char *buf)
3185 {
3186 	return sysfs_emit(buf, "sclk\n");
3187 }
3188 
3189 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3190 				      struct device_attribute *attr,
3191 				      char *buf)
3192 {
3193 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3194 	uint32_t mclk;
3195 	int r;
3196 
3197 	/* get the sclk */
3198 	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3199 				   (void *)&mclk);
3200 	if (r)
3201 		return r;
3202 
3203 	return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3204 }
3205 
3206 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3207 					    struct device_attribute *attr,
3208 					    char *buf)
3209 {
3210 	return sysfs_emit(buf, "mclk\n");
3211 }
3212 
3213 /**
3214  * DOC: hwmon
3215  *
3216  * The amdgpu driver exposes the following sensor interfaces:
3217  *
3218  * - GPU temperature (via the on-die sensor)
3219  *
3220  * - GPU voltage
3221  *
3222  * - Northbridge voltage (APUs only)
3223  *
3224  * - GPU power
3225  *
3226  * - GPU fan
3227  *
3228  * - GPU gfx/compute engine clock
3229  *
3230  * - GPU memory clock (dGPU only)
3231  *
3232  * hwmon interfaces for GPU temperature:
3233  *
3234  * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3235  *   - temp2_input and temp3_input are supported on SOC15 dGPUs only
3236  *
3237  * - temp[1-3]_label: temperature channel label
3238  *   - temp2_label and temp3_label are supported on SOC15 dGPUs only
3239  *
3240  * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3241  *   - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3242  *
3243  * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3244  *   - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3245  *
3246  * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3247  *   - these are supported on SOC15 dGPUs only
3248  *
3249  * hwmon interfaces for GPU voltage:
3250  *
3251  * - in0_input: the voltage on the GPU in millivolts
3252  *
3253  * - in1_input: the voltage on the Northbridge in millivolts
3254  *
3255  * hwmon interfaces for GPU power:
3256  *
3257  * - power1_average: average power used by the SoC in microWatts.  On APUs this includes the CPU.
3258  *
3259  * - power1_input: instantaneous power used by the SoC in microWatts.  On APUs this includes the CPU.
3260  *
3261  * - power1_cap_min: minimum cap supported in microWatts
3262  *
3263  * - power1_cap_max: maximum cap supported in microWatts
3264  *
3265  * - power1_cap: selected power cap in microWatts
3266  *
3267  * hwmon interfaces for GPU fan:
3268  *
3269  * - pwm1: pulse width modulation fan level (0-255)
3270  *
3271  * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3272  *
3273  * - pwm1_min: pulse width modulation fan control minimum level (0)
3274  *
3275  * - pwm1_max: pulse width modulation fan control maximum level (255)
3276  *
3277  * - fan1_min: a minimum value Unit: revolution/min (RPM)
3278  *
3279  * - fan1_max: a maximum value Unit: revolution/max (RPM)
3280  *
3281  * - fan1_input: fan speed in RPM
3282  *
3283  * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3284  *
3285  * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3286  *
3287  * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3288  *       That will get the former one overridden.
3289  *
3290  * hwmon interfaces for GPU clocks:
3291  *
3292  * - freq1_input: the gfx/compute clock in hertz
3293  *
3294  * - freq2_input: the memory clock in hertz
3295  *
3296  * You can use hwmon tools like sensors to view this information on your system.
3297  *
3298  */
3299 
3300 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3301 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3302 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3303 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3304 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3305 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3306 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3307 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3308 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3309 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3310 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3311 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3312 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3313 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3314 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3315 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3316 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3317 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3318 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3319 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3320 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3321 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3322 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3323 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3324 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3325 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3326 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3327 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3328 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3329 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3330 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3331 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3332 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3333 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3334 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3335 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3336 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3337 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3338 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3339 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3340 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3341 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3342 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3343 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3344 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3345 
3346 static struct attribute *hwmon_attributes[] = {
3347 	&sensor_dev_attr_temp1_input.dev_attr.attr,
3348 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
3349 	&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3350 	&sensor_dev_attr_temp2_input.dev_attr.attr,
3351 	&sensor_dev_attr_temp2_crit.dev_attr.attr,
3352 	&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3353 	&sensor_dev_attr_temp3_input.dev_attr.attr,
3354 	&sensor_dev_attr_temp3_crit.dev_attr.attr,
3355 	&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3356 	&sensor_dev_attr_temp1_emergency.dev_attr.attr,
3357 	&sensor_dev_attr_temp2_emergency.dev_attr.attr,
3358 	&sensor_dev_attr_temp3_emergency.dev_attr.attr,
3359 	&sensor_dev_attr_temp1_label.dev_attr.attr,
3360 	&sensor_dev_attr_temp2_label.dev_attr.attr,
3361 	&sensor_dev_attr_temp3_label.dev_attr.attr,
3362 	&sensor_dev_attr_pwm1.dev_attr.attr,
3363 	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
3364 	&sensor_dev_attr_pwm1_min.dev_attr.attr,
3365 	&sensor_dev_attr_pwm1_max.dev_attr.attr,
3366 	&sensor_dev_attr_fan1_input.dev_attr.attr,
3367 	&sensor_dev_attr_fan1_min.dev_attr.attr,
3368 	&sensor_dev_attr_fan1_max.dev_attr.attr,
3369 	&sensor_dev_attr_fan1_target.dev_attr.attr,
3370 	&sensor_dev_attr_fan1_enable.dev_attr.attr,
3371 	&sensor_dev_attr_in0_input.dev_attr.attr,
3372 	&sensor_dev_attr_in0_label.dev_attr.attr,
3373 	&sensor_dev_attr_in1_input.dev_attr.attr,
3374 	&sensor_dev_attr_in1_label.dev_attr.attr,
3375 	&sensor_dev_attr_power1_average.dev_attr.attr,
3376 	&sensor_dev_attr_power1_input.dev_attr.attr,
3377 	&sensor_dev_attr_power1_cap_max.dev_attr.attr,
3378 	&sensor_dev_attr_power1_cap_min.dev_attr.attr,
3379 	&sensor_dev_attr_power1_cap.dev_attr.attr,
3380 	&sensor_dev_attr_power1_cap_default.dev_attr.attr,
3381 	&sensor_dev_attr_power1_label.dev_attr.attr,
3382 	&sensor_dev_attr_power2_average.dev_attr.attr,
3383 	&sensor_dev_attr_power2_cap_max.dev_attr.attr,
3384 	&sensor_dev_attr_power2_cap_min.dev_attr.attr,
3385 	&sensor_dev_attr_power2_cap.dev_attr.attr,
3386 	&sensor_dev_attr_power2_cap_default.dev_attr.attr,
3387 	&sensor_dev_attr_power2_label.dev_attr.attr,
3388 	&sensor_dev_attr_freq1_input.dev_attr.attr,
3389 	&sensor_dev_attr_freq1_label.dev_attr.attr,
3390 	&sensor_dev_attr_freq2_input.dev_attr.attr,
3391 	&sensor_dev_attr_freq2_label.dev_attr.attr,
3392 	NULL
3393 };
3394 
3395 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3396 					struct attribute *attr, int index)
3397 {
3398 	struct device *dev = kobj_to_dev(kobj);
3399 	struct amdgpu_device *adev = dev_get_drvdata(dev);
3400 	umode_t effective_mode = attr->mode;
3401 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3402 	uint32_t tmp;
3403 
3404 	/* under pp one vf mode manage of hwmon attributes is not supported */
3405 	if (amdgpu_sriov_is_pp_one_vf(adev))
3406 		effective_mode &= ~S_IWUSR;
3407 
3408 	/* Skip fan attributes if fan is not present */
3409 	if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3410 	    attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3411 	    attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3412 	    attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3413 	    attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3414 	    attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3415 	    attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3416 	    attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3417 	    attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3418 		return 0;
3419 
3420 	/* Skip fan attributes on APU */
3421 	if ((adev->flags & AMD_IS_APU) &&
3422 	    (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3423 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3424 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3425 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3426 	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3427 	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3428 	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3429 	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3430 	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3431 		return 0;
3432 
3433 	/* Skip crit temp on APU */
3434 	if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3435 	    (gc_ver == IP_VERSION(9, 4, 3))) &&
3436 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3437 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3438 		return 0;
3439 
3440 	/* Skip limit attributes if DPM is not enabled */
3441 	if (!adev->pm.dpm_enabled &&
3442 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3443 	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3444 	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3445 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3446 	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3447 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3448 	     attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3449 	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3450 	     attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3451 	     attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3452 	     attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3453 		return 0;
3454 
3455 	/* mask fan attributes if we have no bindings for this asic to expose */
3456 	if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3457 	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3458 	    ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3459 	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3460 		effective_mode &= ~S_IRUGO;
3461 
3462 	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3463 	      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3464 	      ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3465 	      attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3466 		effective_mode &= ~S_IWUSR;
3467 
3468 	/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3469 	if (((adev->family == AMDGPU_FAMILY_SI) ||
3470 	     ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
3471 	      (gc_ver != IP_VERSION(9, 4, 3)))) &&
3472 	    (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3473 	     attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3474 	     attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3475 	     attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3476 		return 0;
3477 
3478 	/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3479 	if (((adev->family == AMDGPU_FAMILY_SI) ||
3480 	     ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3481 	    (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3482 		return 0;
3483 
3484 	/* not all products support both average and instantaneous */
3485 	if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3486 	    amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
3487 		return 0;
3488 	if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3489 	    amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
3490 		return 0;
3491 
3492 	/* hide max/min values if we can't both query and manage the fan */
3493 	if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3494 	      (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3495 	      (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3496 	      (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3497 	    (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3498 	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3499 		return 0;
3500 
3501 	if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3502 	     (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3503 	     (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3504 	     attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3505 		return 0;
3506 
3507 	if ((adev->family == AMDGPU_FAMILY_SI ||	/* not implemented yet */
3508 	     adev->family == AMDGPU_FAMILY_KV ||	/* not implemented yet */
3509 	     (gc_ver == IP_VERSION(9, 4, 3))) &&
3510 	    (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3511 	     attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3512 		return 0;
3513 
3514 	/* only APUs other than gc 9,4,3 have vddnb */
3515 	if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) &&
3516 	    (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3517 	     attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3518 		return 0;
3519 
3520 	/* no mclk on APUs other than gc 9,4,3*/
3521 	if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3522 	    (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3523 	     attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3524 		return 0;
3525 
3526 	if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3527 	    (gc_ver != IP_VERSION(9, 4, 3)) &&
3528 	    (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3529 	     attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3530 	     attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3531 	     attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3532 	     attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3533 	     attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3534 		return 0;
3535 
3536 	/* hotspot temperature for gc 9,4,3*/
3537 	if (gc_ver == IP_VERSION(9, 4, 3)) {
3538 		if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3539 		    attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3540 		    attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3541 			return 0;
3542 
3543 		if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3544 		    attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3545 			return attr->mode;
3546 	}
3547 
3548 	/* only SOC15 dGPUs support hotspot and mem temperatures */
3549 	if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3550 	    (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3551 	     attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3552 	     attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3553 	     attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3554 	     attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3555 		return 0;
3556 
3557 	/* only Vangogh has fast PPT limit and power labels */
3558 	if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
3559 	    (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3560 	     attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3561 	     attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3562 	     attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3563 	     attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3564 	     attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3565 		return 0;
3566 
3567 	return effective_mode;
3568 }
3569 
3570 static const struct attribute_group hwmon_attrgroup = {
3571 	.attrs = hwmon_attributes,
3572 	.is_visible = hwmon_attributes_visible,
3573 };
3574 
3575 static const struct attribute_group *hwmon_groups[] = {
3576 	&hwmon_attrgroup,
3577 	NULL
3578 };
3579 
3580 static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3581 				       enum pp_clock_type od_type,
3582 				       char *buf)
3583 {
3584 	int size = 0;
3585 	int ret;
3586 
3587 	if (amdgpu_in_reset(adev))
3588 		return -EPERM;
3589 	if (adev->in_suspend && !adev->in_runpm)
3590 		return -EPERM;
3591 
3592 	ret = pm_runtime_get_sync(adev->dev);
3593 	if (ret < 0) {
3594 		pm_runtime_put_autosuspend(adev->dev);
3595 		return ret;
3596 	}
3597 
3598 	size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
3599 	if (size == 0)
3600 		size = sysfs_emit(buf, "\n");
3601 
3602 	pm_runtime_mark_last_busy(adev->dev);
3603 	pm_runtime_put_autosuspend(adev->dev);
3604 
3605 	return size;
3606 }
3607 
3608 static int parse_input_od_command_lines(const char *buf,
3609 					size_t count,
3610 					u32 *type,
3611 					long *params,
3612 					uint32_t *num_of_params)
3613 {
3614 	const char delimiter[3] = {' ', '\n', '\0'};
3615 	uint32_t parameter_size = 0;
3616 	char buf_cpy[128] = {0};
3617 	char *tmp_str, *sub_str;
3618 	int ret;
3619 
3620 	if (count > sizeof(buf_cpy) - 1)
3621 		return -EINVAL;
3622 
3623 	memcpy(buf_cpy, buf, count);
3624 	tmp_str = buf_cpy;
3625 
3626 	/* skip heading spaces */
3627 	while (isspace(*tmp_str))
3628 		tmp_str++;
3629 
3630 	switch (*tmp_str) {
3631 	case 'c':
3632 		*type = PP_OD_COMMIT_DPM_TABLE;
3633 		return 0;
3634 	case 'r':
3635 		params[parameter_size] = *type;
3636 		*num_of_params = 1;
3637 		*type = PP_OD_RESTORE_DEFAULT_TABLE;
3638 		return 0;
3639 	default:
3640 		break;
3641 	}
3642 
3643 	while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3644 		if (strlen(sub_str) == 0)
3645 			continue;
3646 
3647 		ret = kstrtol(sub_str, 0, &params[parameter_size]);
3648 		if (ret)
3649 			return -EINVAL;
3650 		parameter_size++;
3651 
3652 		while (isspace(*tmp_str))
3653 			tmp_str++;
3654 	}
3655 
3656 	*num_of_params = parameter_size;
3657 
3658 	return 0;
3659 }
3660 
3661 static int
3662 amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3663 				     enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3664 				     const char *in_buf,
3665 				     size_t count)
3666 {
3667 	uint32_t parameter_size = 0;
3668 	long parameter[64];
3669 	int ret;
3670 
3671 	if (amdgpu_in_reset(adev))
3672 		return -EPERM;
3673 	if (adev->in_suspend && !adev->in_runpm)
3674 		return -EPERM;
3675 
3676 	ret = parse_input_od_command_lines(in_buf,
3677 					   count,
3678 					   &cmd_type,
3679 					   parameter,
3680 					   &parameter_size);
3681 	if (ret)
3682 		return ret;
3683 
3684 	ret = pm_runtime_get_sync(adev->dev);
3685 	if (ret < 0)
3686 		goto err_out0;
3687 
3688 	ret = amdgpu_dpm_odn_edit_dpm_table(adev,
3689 					    cmd_type,
3690 					    parameter,
3691 					    parameter_size);
3692 	if (ret)
3693 		goto err_out1;
3694 
3695 	if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
3696 		ret = amdgpu_dpm_dispatch_task(adev,
3697 					       AMD_PP_TASK_READJUST_POWER_STATE,
3698 					       NULL);
3699 		if (ret)
3700 			goto err_out1;
3701 	}
3702 
3703 	pm_runtime_mark_last_busy(adev->dev);
3704 	pm_runtime_put_autosuspend(adev->dev);
3705 
3706 	return count;
3707 
3708 err_out1:
3709 	pm_runtime_mark_last_busy(adev->dev);
3710 err_out0:
3711 	pm_runtime_put_autosuspend(adev->dev);
3712 
3713 	return ret;
3714 }
3715 
3716 /**
3717  * DOC: fan_curve
3718  *
3719  * The amdgpu driver provides a sysfs API for checking and adjusting the fan
3720  * control curve line.
3721  *
3722  * Reading back the file shows you the current settings(temperature in Celsius
3723  * degree and fan speed in pwm) applied to every anchor point of the curve line
3724  * and their permitted ranges if changable.
3725  *
3726  * Writing a desired string(with the format like "anchor_point_index temperature
3727  * fan_speed_in_pwm") to the file, change the settings for the specific anchor
3728  * point accordingly.
3729  *
3730  * When you have finished the editing, write "c" (commit) to the file to commit
3731  * your changes.
3732  *
3733  * If you want to reset to the default value, write "r" (reset) to the file to
3734  * reset them
3735  *
3736  * There are two fan control modes supported: auto and manual. With auto mode,
3737  * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
3738  * While with manual mode, users can set their own fan curve line as what
3739  * described here. Normally the ASIC is booted up with auto mode. Any
3740  * settings via this interface will switch the fan control to manual mode
3741  * implicitly.
3742  */
3743 static ssize_t fan_curve_show(struct kobject *kobj,
3744 			      struct kobj_attribute *attr,
3745 			      char *buf)
3746 {
3747 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3748 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3749 
3750 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
3751 }
3752 
3753 static ssize_t fan_curve_store(struct kobject *kobj,
3754 			       struct kobj_attribute *attr,
3755 			       const char *buf,
3756 			       size_t count)
3757 {
3758 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3759 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3760 
3761 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3762 							     PP_OD_EDIT_FAN_CURVE,
3763 							     buf,
3764 							     count);
3765 }
3766 
3767 static umode_t fan_curve_visible(struct amdgpu_device *adev)
3768 {
3769 	umode_t umode = 0000;
3770 
3771 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
3772 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
3773 
3774 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
3775 		umode |= S_IWUSR;
3776 
3777 	return umode;
3778 }
3779 
3780 /**
3781  * DOC: acoustic_limit_rpm_threshold
3782  *
3783  * The amdgpu driver provides a sysfs API for checking and adjusting the
3784  * acoustic limit in RPM for fan control.
3785  *
3786  * Reading back the file shows you the current setting and the permitted
3787  * ranges if changable.
3788  *
3789  * Writing an integer to the file, change the setting accordingly.
3790  *
3791  * When you have finished the editing, write "c" (commit) to the file to commit
3792  * your changes.
3793  *
3794  * If you want to reset to the default value, write "r" (reset) to the file to
3795  * reset them
3796  *
3797  * This setting works under auto fan control mode only. It adjusts the PMFW's
3798  * behavior about the maximum speed in RPM the fan can spin. Setting via this
3799  * interface will switch the fan control to auto mode implicitly.
3800  */
3801 static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
3802 					     struct kobj_attribute *attr,
3803 					     char *buf)
3804 {
3805 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3806 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3807 
3808 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
3809 }
3810 
3811 static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
3812 					      struct kobj_attribute *attr,
3813 					      const char *buf,
3814 					      size_t count)
3815 {
3816 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3817 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3818 
3819 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3820 							     PP_OD_EDIT_ACOUSTIC_LIMIT,
3821 							     buf,
3822 							     count);
3823 }
3824 
3825 static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
3826 {
3827 	umode_t umode = 0000;
3828 
3829 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
3830 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
3831 
3832 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
3833 		umode |= S_IWUSR;
3834 
3835 	return umode;
3836 }
3837 
3838 /**
3839  * DOC: acoustic_target_rpm_threshold
3840  *
3841  * The amdgpu driver provides a sysfs API for checking and adjusting the
3842  * acoustic target in RPM for fan control.
3843  *
3844  * Reading back the file shows you the current setting and the permitted
3845  * ranges if changable.
3846  *
3847  * Writing an integer to the file, change the setting accordingly.
3848  *
3849  * When you have finished the editing, write "c" (commit) to the file to commit
3850  * your changes.
3851  *
3852  * If you want to reset to the default value, write "r" (reset) to the file to
3853  * reset them
3854  *
3855  * This setting works under auto fan control mode only. It can co-exist with
3856  * other settings which can work also under auto mode. It adjusts the PMFW's
3857  * behavior about the maximum speed in RPM the fan can spin when ASIC
3858  * temperature is not greater than target temperature. Setting via this
3859  * interface will switch the fan control to auto mode implicitly.
3860  */
3861 static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
3862 					      struct kobj_attribute *attr,
3863 					      char *buf)
3864 {
3865 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3866 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3867 
3868 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
3869 }
3870 
3871 static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
3872 					       struct kobj_attribute *attr,
3873 					       const char *buf,
3874 					       size_t count)
3875 {
3876 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3877 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3878 
3879 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3880 							     PP_OD_EDIT_ACOUSTIC_TARGET,
3881 							     buf,
3882 							     count);
3883 }
3884 
3885 static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
3886 {
3887 	umode_t umode = 0000;
3888 
3889 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
3890 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
3891 
3892 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
3893 		umode |= S_IWUSR;
3894 
3895 	return umode;
3896 }
3897 
3898 /**
3899  * DOC: fan_target_temperature
3900  *
3901  * The amdgpu driver provides a sysfs API for checking and adjusting the
3902  * target tempeature in Celsius degree for fan control.
3903  *
3904  * Reading back the file shows you the current setting and the permitted
3905  * ranges if changable.
3906  *
3907  * Writing an integer to the file, change the setting accordingly.
3908  *
3909  * When you have finished the editing, write "c" (commit) to the file to commit
3910  * your changes.
3911  *
3912  * If you want to reset to the default value, write "r" (reset) to the file to
3913  * reset them
3914  *
3915  * This setting works under auto fan control mode only. It can co-exist with
3916  * other settings which can work also under auto mode. Paring with the
3917  * acoustic_target_rpm_threshold setting, they define the maximum speed in
3918  * RPM the fan can spin when ASIC temperature is not greater than target
3919  * temperature. Setting via this interface will switch the fan control to
3920  * auto mode implicitly.
3921  */
3922 static ssize_t fan_target_temperature_show(struct kobject *kobj,
3923 					   struct kobj_attribute *attr,
3924 					   char *buf)
3925 {
3926 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3927 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3928 
3929 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
3930 }
3931 
3932 static ssize_t fan_target_temperature_store(struct kobject *kobj,
3933 					    struct kobj_attribute *attr,
3934 					    const char *buf,
3935 					    size_t count)
3936 {
3937 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3938 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3939 
3940 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3941 							     PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
3942 							     buf,
3943 							     count);
3944 }
3945 
3946 static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
3947 {
3948 	umode_t umode = 0000;
3949 
3950 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
3951 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
3952 
3953 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
3954 		umode |= S_IWUSR;
3955 
3956 	return umode;
3957 }
3958 
3959 /**
3960  * DOC: fan_minimum_pwm
3961  *
3962  * The amdgpu driver provides a sysfs API for checking and adjusting the
3963  * minimum fan speed in PWM.
3964  *
3965  * Reading back the file shows you the current setting and the permitted
3966  * ranges if changable.
3967  *
3968  * Writing an integer to the file, change the setting accordingly.
3969  *
3970  * When you have finished the editing, write "c" (commit) to the file to commit
3971  * your changes.
3972  *
3973  * If you want to reset to the default value, write "r" (reset) to the file to
3974  * reset them
3975  *
3976  * This setting works under auto fan control mode only. It can co-exist with
3977  * other settings which can work also under auto mode. It adjusts the PMFW's
3978  * behavior about the minimum fan speed in PWM the fan should spin. Setting
3979  * via this interface will switch the fan control to auto mode implicitly.
3980  */
3981 static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
3982 				    struct kobj_attribute *attr,
3983 				    char *buf)
3984 {
3985 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3986 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3987 
3988 	return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
3989 }
3990 
3991 static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
3992 				     struct kobj_attribute *attr,
3993 				     const char *buf,
3994 				     size_t count)
3995 {
3996 	struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3997 	struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3998 
3999 	return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4000 							     PP_OD_EDIT_FAN_MINIMUM_PWM,
4001 							     buf,
4002 							     count);
4003 }
4004 
4005 static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
4006 {
4007 	umode_t umode = 0000;
4008 
4009 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
4010 		umode |= S_IRUSR | S_IRGRP | S_IROTH;
4011 
4012 	if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
4013 		umode |= S_IWUSR;
4014 
4015 	return umode;
4016 }
4017 
4018 static struct od_feature_set amdgpu_od_set = {
4019 	.containers = {
4020 		[0] = {
4021 			.name = "fan_ctrl",
4022 			.sub_feature = {
4023 				[0] = {
4024 					.name = "fan_curve",
4025 					.ops = {
4026 						.is_visible = fan_curve_visible,
4027 						.show = fan_curve_show,
4028 						.store = fan_curve_store,
4029 					},
4030 				},
4031 				[1] = {
4032 					.name = "acoustic_limit_rpm_threshold",
4033 					.ops = {
4034 						.is_visible = acoustic_limit_threshold_visible,
4035 						.show = acoustic_limit_threshold_show,
4036 						.store = acoustic_limit_threshold_store,
4037 					},
4038 				},
4039 				[2] = {
4040 					.name = "acoustic_target_rpm_threshold",
4041 					.ops = {
4042 						.is_visible = acoustic_target_threshold_visible,
4043 						.show = acoustic_target_threshold_show,
4044 						.store = acoustic_target_threshold_store,
4045 					},
4046 				},
4047 				[3] = {
4048 					.name = "fan_target_temperature",
4049 					.ops = {
4050 						.is_visible = fan_target_temperature_visible,
4051 						.show = fan_target_temperature_show,
4052 						.store = fan_target_temperature_store,
4053 					},
4054 				},
4055 				[4] = {
4056 					.name = "fan_minimum_pwm",
4057 					.ops = {
4058 						.is_visible = fan_minimum_pwm_visible,
4059 						.show = fan_minimum_pwm_show,
4060 						.store = fan_minimum_pwm_store,
4061 					},
4062 				},
4063 			},
4064 		},
4065 	},
4066 };
4067 
4068 static void od_kobj_release(struct kobject *kobj)
4069 {
4070 	struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
4071 
4072 	kfree(od_kobj);
4073 }
4074 
4075 static const struct kobj_type od_ktype = {
4076 	.release	= od_kobj_release,
4077 	.sysfs_ops	= &kobj_sysfs_ops,
4078 };
4079 
4080 static void amdgpu_od_set_fini(struct amdgpu_device *adev)
4081 {
4082 	struct od_kobj *container, *container_next;
4083 	struct od_attribute *attribute, *attribute_next;
4084 
4085 	if (list_empty(&adev->pm.od_kobj_list))
4086 		return;
4087 
4088 	list_for_each_entry_safe(container, container_next,
4089 				 &adev->pm.od_kobj_list, entry) {
4090 		list_del(&container->entry);
4091 
4092 		list_for_each_entry_safe(attribute, attribute_next,
4093 					 &container->attribute, entry) {
4094 			list_del(&attribute->entry);
4095 			sysfs_remove_file(&container->kobj,
4096 					  &attribute->attribute.attr);
4097 			kfree(attribute);
4098 		}
4099 
4100 		kobject_put(&container->kobj);
4101 	}
4102 }
4103 
4104 static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4105 					   struct od_feature_ops *feature_ops)
4106 {
4107 	umode_t mode;
4108 
4109 	if (!feature_ops->is_visible)
4110 		return false;
4111 
4112 	/*
4113 	 * If the feature has no user read and write mode set,
4114 	 * we can assume the feature is actually not supported.(?)
4115 	 * And the revelant sysfs interface should not be exposed.
4116 	 */
4117 	mode = feature_ops->is_visible(adev);
4118 	if (mode & (S_IRUSR | S_IWUSR))
4119 		return true;
4120 
4121 	return false;
4122 }
4123 
4124 static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4125 					struct od_feature_container *container)
4126 {
4127 	int i;
4128 
4129 	/*
4130 	 * If there is no valid entry within the container, the container
4131 	 * is recognized as a self contained container. And the valid entry
4132 	 * here means it has a valid naming and it is visible/supported by
4133 	 * the ASIC.
4134 	 */
4135 	for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4136 		if (container->sub_feature[i].name &&
4137 		    amdgpu_is_od_feature_supported(adev,
4138 			&container->sub_feature[i].ops))
4139 			return false;
4140 	}
4141 
4142 	return true;
4143 }
4144 
4145 static int amdgpu_od_set_init(struct amdgpu_device *adev)
4146 {
4147 	struct od_kobj *top_set, *sub_set;
4148 	struct od_attribute *attribute;
4149 	struct od_feature_container *container;
4150 	struct od_feature_item *feature;
4151 	int i, j;
4152 	int ret;
4153 
4154 	/* Setup the top `gpu_od` directory which holds all other OD interfaces */
4155 	top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
4156 	if (!top_set)
4157 		return -ENOMEM;
4158 	list_add(&top_set->entry, &adev->pm.od_kobj_list);
4159 
4160 	ret = kobject_init_and_add(&top_set->kobj,
4161 				   &od_ktype,
4162 				   &adev->dev->kobj,
4163 				   "%s",
4164 				   "gpu_od");
4165 	if (ret)
4166 		goto err_out;
4167 	INIT_LIST_HEAD(&top_set->attribute);
4168 	top_set->priv = adev;
4169 
4170 	for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4171 		container = &amdgpu_od_set.containers[i];
4172 
4173 		if (!container->name)
4174 			continue;
4175 
4176 		/*
4177 		 * If there is valid entries within the container, the container
4178 		 * will be presented as a sub directory and all its holding entries
4179 		 * will be presented as plain files under it.
4180 		 * While if there is no valid entry within the container, the container
4181 		 * itself will be presented as a plain file under top `gpu_od` directory.
4182 		 */
4183 		if (amdgpu_od_is_self_contained(adev, container)) {
4184 			if (!amdgpu_is_od_feature_supported(adev,
4185 			     &container->ops))
4186 				continue;
4187 
4188 			/*
4189 			 * The container is presented as a plain file under top `gpu_od`
4190 			 * directory.
4191 			 */
4192 			attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4193 			if (!attribute) {
4194 				ret = -ENOMEM;
4195 				goto err_out;
4196 			}
4197 			list_add(&attribute->entry, &top_set->attribute);
4198 
4199 			attribute->attribute.attr.mode =
4200 					container->ops.is_visible(adev);
4201 			attribute->attribute.attr.name = container->name;
4202 			attribute->attribute.show =
4203 					container->ops.show;
4204 			attribute->attribute.store =
4205 					container->ops.store;
4206 			ret = sysfs_create_file(&top_set->kobj,
4207 						&attribute->attribute.attr);
4208 			if (ret)
4209 				goto err_out;
4210 		} else {
4211 			/* The container is presented as a sub directory. */
4212 			sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
4213 			if (!sub_set) {
4214 				ret = -ENOMEM;
4215 				goto err_out;
4216 			}
4217 			list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4218 
4219 			ret = kobject_init_and_add(&sub_set->kobj,
4220 						   &od_ktype,
4221 						   &top_set->kobj,
4222 						   "%s",
4223 						   container->name);
4224 			if (ret)
4225 				goto err_out;
4226 			INIT_LIST_HEAD(&sub_set->attribute);
4227 			sub_set->priv = adev;
4228 
4229 			for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4230 				feature = &container->sub_feature[j];
4231 				if (!feature->name)
4232 					continue;
4233 
4234 				if (!amdgpu_is_od_feature_supported(adev,
4235 				     &feature->ops))
4236 					continue;
4237 
4238 				/*
4239 				 * With the container presented as a sub directory, the entry within
4240 				 * it is presented as a plain file under the sub directory.
4241 				 */
4242 				attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4243 				if (!attribute) {
4244 					ret = -ENOMEM;
4245 					goto err_out;
4246 				}
4247 				list_add(&attribute->entry, &sub_set->attribute);
4248 
4249 				attribute->attribute.attr.mode =
4250 						feature->ops.is_visible(adev);
4251 				attribute->attribute.attr.name = feature->name;
4252 				attribute->attribute.show =
4253 						feature->ops.show;
4254 				attribute->attribute.store =
4255 						feature->ops.store;
4256 				ret = sysfs_create_file(&sub_set->kobj,
4257 							&attribute->attribute.attr);
4258 				if (ret)
4259 					goto err_out;
4260 			}
4261 		}
4262 	}
4263 
4264 	return 0;
4265 
4266 err_out:
4267 	amdgpu_od_set_fini(adev);
4268 
4269 	return ret;
4270 }
4271 
4272 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4273 {
4274 	enum amdgpu_sriov_vf_mode mode;
4275 	uint32_t mask = 0;
4276 	int ret;
4277 
4278 	if (adev->pm.sysfs_initialized)
4279 		return 0;
4280 
4281 	INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4282 
4283 	if (adev->pm.dpm_enabled == 0)
4284 		return 0;
4285 
4286 	mode = amdgpu_virt_get_sriov_vf_mode(adev);
4287 
4288 	/* under multi-vf mode, the hwmon attributes are all not supported */
4289 	if (mode != SRIOV_VF_MODE_MULTI_VF) {
4290 		adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4291 														DRIVER_NAME, adev,
4292 														hwmon_groups);
4293 		if (IS_ERR(adev->pm.int_hwmon_dev)) {
4294 			ret = PTR_ERR(adev->pm.int_hwmon_dev);
4295 			dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4296 			return ret;
4297 		}
4298 	}
4299 
4300 	switch (mode) {
4301 	case SRIOV_VF_MODE_ONE_VF:
4302 		mask = ATTR_FLAG_ONEVF;
4303 		break;
4304 	case SRIOV_VF_MODE_MULTI_VF:
4305 		mask = 0;
4306 		break;
4307 	case SRIOV_VF_MODE_BARE_METAL:
4308 	default:
4309 		mask = ATTR_FLAG_MASK_ALL;
4310 		break;
4311 	}
4312 
4313 	ret = amdgpu_device_attr_create_groups(adev,
4314 					       amdgpu_device_attrs,
4315 					       ARRAY_SIZE(amdgpu_device_attrs),
4316 					       mask,
4317 					       &adev->pm.pm_attr_list);
4318 	if (ret)
4319 		goto err_out0;
4320 
4321 	if (amdgpu_dpm_is_overdrive_supported(adev)) {
4322 		ret = amdgpu_od_set_init(adev);
4323 		if (ret)
4324 			goto err_out1;
4325 	}
4326 
4327 	adev->pm.sysfs_initialized = true;
4328 
4329 	return 0;
4330 
4331 err_out1:
4332 	amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4333 err_out0:
4334 	if (adev->pm.int_hwmon_dev)
4335 		hwmon_device_unregister(adev->pm.int_hwmon_dev);
4336 
4337 	return ret;
4338 }
4339 
4340 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4341 {
4342 	amdgpu_od_set_fini(adev);
4343 
4344 	if (adev->pm.int_hwmon_dev)
4345 		hwmon_device_unregister(adev->pm.int_hwmon_dev);
4346 
4347 	amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4348 }
4349 
4350 /*
4351  * Debugfs info
4352  */
4353 #if defined(CONFIG_DEBUG_FS)
4354 
4355 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4356 					   struct amdgpu_device *adev)
4357 {
4358 	uint16_t *p_val;
4359 	uint32_t size;
4360 	int i;
4361 	uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4362 
4363 	if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4364 		p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4365 				GFP_KERNEL);
4366 
4367 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4368 					    (void *)p_val, &size)) {
4369 			for (i = 0; i < num_cpu_cores; i++)
4370 				seq_printf(m, "\t%u MHz (CPU%d)\n",
4371 					   *(p_val + i), i);
4372 		}
4373 
4374 		kfree(p_val);
4375 	}
4376 }
4377 
4378 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4379 {
4380 	uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4381 	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4382 	uint32_t value;
4383 	uint64_t value64 = 0;
4384 	uint32_t query = 0;
4385 	int size;
4386 
4387 	/* GPU Clocks */
4388 	size = sizeof(value);
4389 	seq_printf(m, "GFX Clocks and Power:\n");
4390 
4391 	amdgpu_debugfs_prints_cpu_info(m, adev);
4392 
4393 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4394 		seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4395 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4396 		seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4397 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4398 		seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4399 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4400 		seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4401 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4402 		seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4403 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4404 		seq_printf(m, "\t%u mV (VDDNB)\n", value);
4405 	size = sizeof(uint32_t);
4406 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
4407 		if (adev->flags & AMD_IS_APU)
4408 			seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
4409 		else
4410 			seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
4411 	}
4412 	size = sizeof(uint32_t);
4413 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
4414 		if (adev->flags & AMD_IS_APU)
4415 			seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
4416 		else
4417 			seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
4418 	}
4419 	size = sizeof(value);
4420 	seq_printf(m, "\n");
4421 
4422 	/* GPU Temp */
4423 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4424 		seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4425 
4426 	/* GPU Load */
4427 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4428 		seq_printf(m, "GPU Load: %u %%\n", value);
4429 	/* MEM Load */
4430 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4431 		seq_printf(m, "MEM Load: %u %%\n", value);
4432 
4433 	seq_printf(m, "\n");
4434 
4435 	/* SMC feature mask */
4436 	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4437 		seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4438 
4439 	/* ASICs greater than CHIP_VEGA20 supports these sensors */
4440 	if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4441 		/* VCN clocks */
4442 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4443 			if (!value) {
4444 				seq_printf(m, "VCN: Powered down\n");
4445 			} else {
4446 				seq_printf(m, "VCN: Powered up\n");
4447 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4448 					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4449 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4450 					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4451 			}
4452 		}
4453 		seq_printf(m, "\n");
4454 	} else {
4455 		/* UVD clocks */
4456 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4457 			if (!value) {
4458 				seq_printf(m, "UVD: Powered down\n");
4459 			} else {
4460 				seq_printf(m, "UVD: Powered up\n");
4461 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4462 					seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4463 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4464 					seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4465 			}
4466 		}
4467 		seq_printf(m, "\n");
4468 
4469 		/* VCE clocks */
4470 		if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4471 			if (!value) {
4472 				seq_printf(m, "VCE: Powered down\n");
4473 			} else {
4474 				seq_printf(m, "VCE: Powered up\n");
4475 				if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4476 					seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4477 			}
4478 		}
4479 	}
4480 
4481 	return 0;
4482 }
4483 
4484 static const struct cg_flag_name clocks[] = {
4485 	{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4486 	{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4487 	{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4488 	{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4489 	{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4490 	{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4491 	{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4492 	{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4493 	{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4494 	{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4495 	{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4496 	{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4497 	{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4498 	{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4499 	{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4500 	{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4501 	{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4502 	{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4503 	{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4504 	{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4505 	{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4506 	{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4507 	{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4508 	{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4509 	{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4510 	{AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4511 	{AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4512 	{AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4513 	{AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
4514 	{AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
4515 	{AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
4516 	{AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
4517 	{AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
4518 	{AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
4519 	{0, NULL},
4520 };
4521 
4522 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
4523 {
4524 	int i;
4525 
4526 	for (i = 0; clocks[i].flag; i++)
4527 		seq_printf(m, "\t%s: %s\n", clocks[i].name,
4528 			   (flags & clocks[i].flag) ? "On" : "Off");
4529 }
4530 
4531 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
4532 {
4533 	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
4534 	struct drm_device *dev = adev_to_drm(adev);
4535 	u64 flags = 0;
4536 	int r;
4537 
4538 	if (amdgpu_in_reset(adev))
4539 		return -EPERM;
4540 	if (adev->in_suspend && !adev->in_runpm)
4541 		return -EPERM;
4542 
4543 	r = pm_runtime_get_sync(dev->dev);
4544 	if (r < 0) {
4545 		pm_runtime_put_autosuspend(dev->dev);
4546 		return r;
4547 	}
4548 
4549 	if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
4550 		r = amdgpu_debugfs_pm_info_pp(m, adev);
4551 		if (r)
4552 			goto out;
4553 	}
4554 
4555 	amdgpu_device_ip_get_clockgating_state(adev, &flags);
4556 
4557 	seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
4558 	amdgpu_parse_cg_state(m, flags);
4559 	seq_printf(m, "\n");
4560 
4561 out:
4562 	pm_runtime_mark_last_busy(dev->dev);
4563 	pm_runtime_put_autosuspend(dev->dev);
4564 
4565 	return r;
4566 }
4567 
4568 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
4569 
4570 /*
4571  * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
4572  *
4573  * Reads debug memory region allocated to PMFW
4574  */
4575 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
4576 					 size_t size, loff_t *pos)
4577 {
4578 	struct amdgpu_device *adev = file_inode(f)->i_private;
4579 	size_t smu_prv_buf_size;
4580 	void *smu_prv_buf;
4581 	int ret = 0;
4582 
4583 	if (amdgpu_in_reset(adev))
4584 		return -EPERM;
4585 	if (adev->in_suspend && !adev->in_runpm)
4586 		return -EPERM;
4587 
4588 	ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
4589 	if (ret)
4590 		return ret;
4591 
4592 	if (!smu_prv_buf || !smu_prv_buf_size)
4593 		return -EINVAL;
4594 
4595 	return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
4596 				       smu_prv_buf_size);
4597 }
4598 
4599 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
4600 	.owner = THIS_MODULE,
4601 	.open = simple_open,
4602 	.read = amdgpu_pm_prv_buffer_read,
4603 	.llseek = default_llseek,
4604 };
4605 
4606 #endif
4607 
4608 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
4609 {
4610 #if defined(CONFIG_DEBUG_FS)
4611 	struct drm_minor *minor = adev_to_drm(adev)->primary;
4612 	struct dentry *root = minor->debugfs_root;
4613 
4614 	if (!adev->pm.dpm_enabled)
4615 		return;
4616 
4617 	debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
4618 			    &amdgpu_debugfs_pm_info_fops);
4619 
4620 	if (adev->pm.smu_prv_buffer_size > 0)
4621 		debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
4622 					 adev,
4623 					 &amdgpu_debugfs_pm_prv_buffer_fops,
4624 					 adev->pm.smu_prv_buffer_size);
4625 
4626 	amdgpu_dpm_stb_debug_fs_init(adev);
4627 #endif
4628 }
4629