1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
24 */
25
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/string_choices.h>
37 #include <asm/processor.h>
38
39 #define MAX_NUM_OF_FEATURES_PER_SUBSET 8
40 #define MAX_NUM_OF_SUBSETS 8
41
42 #define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name)
43
44 struct od_attribute {
45 struct kobj_attribute attribute;
46 struct list_head entry;
47 };
48
49 struct od_kobj {
50 struct kobject kobj;
51 struct list_head entry;
52 struct list_head attribute;
53 void *priv;
54 };
55
56 struct od_feature_ops {
57 umode_t (*is_visible)(struct amdgpu_device *adev);
58 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
59 char *buf);
60 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
61 const char *buf, size_t count);
62 };
63
64 struct od_feature_item {
65 const char *name;
66 struct od_feature_ops ops;
67 };
68
69 struct od_feature_container {
70 char *name;
71 struct od_feature_ops ops;
72 struct od_feature_item sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
73 };
74
75 struct od_feature_set {
76 struct od_feature_container containers[MAX_NUM_OF_SUBSETS];
77 };
78
79 static const struct hwmon_temp_label {
80 enum PP_HWMON_TEMP channel;
81 const char *label;
82 } temp_label[] = {
83 {PP_TEMP_EDGE, "edge"},
84 {PP_TEMP_JUNCTION, "junction"},
85 {PP_TEMP_MEM, "mem"},
86 };
87
88 const char * const amdgpu_pp_profile_name[] = {
89 "BOOTUP_DEFAULT",
90 "3D_FULL_SCREEN",
91 "POWER_SAVING",
92 "VIDEO",
93 "VR",
94 "COMPUTE",
95 "CUSTOM",
96 "WINDOW_3D",
97 "CAPPED",
98 "UNCAPPED",
99 };
100
101 /**
102 * amdgpu_pm_dev_state_check - Check if device can be accessed.
103 * @adev: Target device.
104 * @runpm: Check runpm status for suspend state checks.
105 *
106 * Checks the state of the @adev for access. Return 0 if the device is
107 * accessible or a negative error code otherwise.
108 */
amdgpu_pm_dev_state_check(struct amdgpu_device * adev,bool runpm)109 static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
110 {
111 bool runpm_check = runpm ? adev->in_runpm : false;
112 bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT);
113
114 if (amdgpu_in_reset(adev) || !full_init)
115 return -EBUSY;
116
117 if (adev->in_suspend && !runpm_check)
118 return -EBUSY;
119
120 return 0;
121 }
122
123 /**
124 * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
125 * @adev: Target device.
126 *
127 * Checks the state of the @adev for access. Use runtime pm API to resume if
128 * needed. Return 0 if the device is accessible or a negative error code
129 * otherwise.
130 */
amdgpu_pm_get_access(struct amdgpu_device * adev)131 static int amdgpu_pm_get_access(struct amdgpu_device *adev)
132 {
133 int ret;
134
135 ret = amdgpu_pm_dev_state_check(adev, true);
136 if (ret)
137 return ret;
138
139 return pm_runtime_resume_and_get(adev->dev);
140 }
141
142 /**
143 * amdgpu_pm_get_access_if_active - Check if device is active for access.
144 * @adev: Target device.
145 *
146 * Checks the state of the @adev for access. Use runtime pm API to determine
147 * if device is active. Allow access only if device is active.Return 0 if the
148 * device is accessible or a negative error code otherwise.
149 */
amdgpu_pm_get_access_if_active(struct amdgpu_device * adev)150 static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
151 {
152 int ret;
153
154 /* Ignore runpm status. If device is in suspended state, deny access */
155 ret = amdgpu_pm_dev_state_check(adev, false);
156 if (ret)
157 return ret;
158
159 /*
160 * Allow only if device is active. If runpm is disabled also, as in
161 * kernels without CONFIG_PM, allow access.
162 */
163 ret = pm_runtime_get_if_active(adev->dev);
164 if (!ret)
165 return -EPERM;
166
167 return 0;
168 }
169
170 /**
171 * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
172 * @adev: Target device.
173 *
174 * Should be paired with amdgpu_pm_get_access* calls
175 */
amdgpu_pm_put_access(struct amdgpu_device * adev)176 static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
177 {
178 pm_runtime_put_autosuspend(adev->dev);
179 }
180
181 /**
182 * DOC: power_dpm_state
183 *
184 * The power_dpm_state file is a legacy interface and is only provided for
185 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
186 * certain power related parameters. The file power_dpm_state is used for this.
187 * It accepts the following arguments:
188 *
189 * - battery
190 *
191 * - balanced
192 *
193 * - performance
194 *
195 * battery
196 *
197 * On older GPUs, the vbios provided a special power state for battery
198 * operation. Selecting battery switched to this state. This is no
199 * longer provided on newer GPUs so the option does nothing in that case.
200 *
201 * balanced
202 *
203 * On older GPUs, the vbios provided a special power state for balanced
204 * operation. Selecting balanced switched to this state. This is no
205 * longer provided on newer GPUs so the option does nothing in that case.
206 *
207 * performance
208 *
209 * On older GPUs, the vbios provided a special power state for performance
210 * operation. Selecting performance switched to this state. This is no
211 * longer provided on newer GPUs so the option does nothing in that case.
212 *
213 */
214
amdgpu_get_power_dpm_state(struct device * dev,struct device_attribute * attr,char * buf)215 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
216 struct device_attribute *attr,
217 char *buf)
218 {
219 struct drm_device *ddev = dev_get_drvdata(dev);
220 struct amdgpu_device *adev = drm_to_adev(ddev);
221 enum amd_pm_state_type pm;
222 int ret;
223
224 ret = amdgpu_pm_get_access_if_active(adev);
225 if (ret)
226 return ret;
227
228 amdgpu_dpm_get_current_power_state(adev, &pm);
229
230 amdgpu_pm_put_access(adev);
231
232 return sysfs_emit(buf, "%s\n",
233 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
234 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
235 }
236
amdgpu_set_power_dpm_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)237 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
238 struct device_attribute *attr,
239 const char *buf,
240 size_t count)
241 {
242 struct drm_device *ddev = dev_get_drvdata(dev);
243 struct amdgpu_device *adev = drm_to_adev(ddev);
244 enum amd_pm_state_type state;
245 int ret;
246
247 if (sysfs_streq(buf, "battery"))
248 state = POWER_STATE_TYPE_BATTERY;
249 else if (sysfs_streq(buf, "balanced"))
250 state = POWER_STATE_TYPE_BALANCED;
251 else if (sysfs_streq(buf, "performance"))
252 state = POWER_STATE_TYPE_PERFORMANCE;
253 else
254 return -EINVAL;
255
256 ret = amdgpu_pm_get_access(adev);
257 if (ret < 0)
258 return ret;
259
260 amdgpu_dpm_set_power_state(adev, state);
261
262 amdgpu_pm_put_access(adev);
263
264 return count;
265 }
266
267
268 /**
269 * DOC: power_dpm_force_performance_level
270 *
271 * The amdgpu driver provides a sysfs API for adjusting certain power
272 * related parameters. The file power_dpm_force_performance_level is
273 * used for this. It accepts the following arguments:
274 *
275 * - auto
276 *
277 * - low
278 *
279 * - high
280 *
281 * - manual
282 *
283 * - profile_standard
284 *
285 * - profile_min_sclk
286 *
287 * - profile_min_mclk
288 *
289 * - profile_peak
290 *
291 * auto
292 *
293 * When auto is selected, the driver will attempt to dynamically select
294 * the optimal power profile for current conditions in the driver.
295 *
296 * low
297 *
298 * When low is selected, the clocks are forced to the lowest power state.
299 *
300 * high
301 *
302 * When high is selected, the clocks are forced to the highest power state.
303 *
304 * manual
305 *
306 * When manual is selected, the user can manually adjust which power states
307 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
308 * and pp_dpm_pcie files and adjust the power state transition heuristics
309 * via the pp_power_profile_mode sysfs file.
310 *
311 * profile_standard
312 * profile_min_sclk
313 * profile_min_mclk
314 * profile_peak
315 *
316 * When the profiling modes are selected, clock and power gating are
317 * disabled and the clocks are set for different profiling cases. This
318 * mode is recommended for profiling specific work loads where you do
319 * not want clock or power gating for clock fluctuation to interfere
320 * with your results. profile_standard sets the clocks to a fixed clock
321 * level which varies from asic to asic. profile_min_sclk forces the sclk
322 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
323 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
324 *
325 */
326
amdgpu_get_power_dpm_force_performance_level(struct device * dev,struct device_attribute * attr,char * buf)327 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
328 struct device_attribute *attr,
329 char *buf)
330 {
331 struct drm_device *ddev = dev_get_drvdata(dev);
332 struct amdgpu_device *adev = drm_to_adev(ddev);
333 enum amd_dpm_forced_level level = 0xff;
334 int ret;
335
336 ret = amdgpu_pm_get_access_if_active(adev);
337 if (ret)
338 return ret;
339
340 level = amdgpu_dpm_get_performance_level(adev);
341
342 amdgpu_pm_put_access(adev);
343
344 return sysfs_emit(buf, "%s\n",
345 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
346 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
347 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
348 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
349 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
350 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
351 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
352 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
353 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
354 "unknown");
355 }
356
amdgpu_set_power_dpm_force_performance_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)357 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
358 struct device_attribute *attr,
359 const char *buf,
360 size_t count)
361 {
362 struct drm_device *ddev = dev_get_drvdata(dev);
363 struct amdgpu_device *adev = drm_to_adev(ddev);
364 enum amd_dpm_forced_level level;
365 int ret = 0;
366
367 if (sysfs_streq(buf, "low"))
368 level = AMD_DPM_FORCED_LEVEL_LOW;
369 else if (sysfs_streq(buf, "high"))
370 level = AMD_DPM_FORCED_LEVEL_HIGH;
371 else if (sysfs_streq(buf, "auto"))
372 level = AMD_DPM_FORCED_LEVEL_AUTO;
373 else if (sysfs_streq(buf, "manual"))
374 level = AMD_DPM_FORCED_LEVEL_MANUAL;
375 else if (sysfs_streq(buf, "profile_exit"))
376 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
377 else if (sysfs_streq(buf, "profile_standard"))
378 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
379 else if (sysfs_streq(buf, "profile_min_sclk"))
380 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
381 else if (sysfs_streq(buf, "profile_min_mclk"))
382 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
383 else if (sysfs_streq(buf, "profile_peak"))
384 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
385 else if (sysfs_streq(buf, "perf_determinism"))
386 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
387 else
388 return -EINVAL;
389
390 ret = amdgpu_pm_get_access(adev);
391 if (ret < 0)
392 return ret;
393
394 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
395 if (amdgpu_dpm_force_performance_level(adev, level)) {
396 amdgpu_pm_put_access(adev);
397 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
398 return -EINVAL;
399 }
400 /* override whatever a user ctx may have set */
401 adev->pm.stable_pstate_ctx = NULL;
402 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
403
404 amdgpu_pm_put_access(adev);
405
406 return count;
407 }
408
amdgpu_get_pp_num_states(struct device * dev,struct device_attribute * attr,char * buf)409 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
410 struct device_attribute *attr,
411 char *buf)
412 {
413 struct drm_device *ddev = dev_get_drvdata(dev);
414 struct amdgpu_device *adev = drm_to_adev(ddev);
415 struct pp_states_info data;
416 uint32_t i;
417 int buf_len, ret;
418
419 ret = amdgpu_pm_get_access_if_active(adev);
420 if (ret)
421 return ret;
422
423 if (amdgpu_dpm_get_pp_num_states(adev, &data))
424 memset(&data, 0, sizeof(data));
425
426 amdgpu_pm_put_access(adev);
427
428 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
429 for (i = 0; i < data.nums; i++)
430 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
431 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
432 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
433 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
434 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
435
436 return buf_len;
437 }
438
amdgpu_get_pp_cur_state(struct device * dev,struct device_attribute * attr,char * buf)439 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
440 struct device_attribute *attr,
441 char *buf)
442 {
443 struct drm_device *ddev = dev_get_drvdata(dev);
444 struct amdgpu_device *adev = drm_to_adev(ddev);
445 struct pp_states_info data = {0};
446 enum amd_pm_state_type pm = 0;
447 int i = 0, ret = 0;
448
449 ret = amdgpu_pm_get_access_if_active(adev);
450 if (ret)
451 return ret;
452
453 amdgpu_dpm_get_current_power_state(adev, &pm);
454
455 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
456
457 amdgpu_pm_put_access(adev);
458
459 if (ret)
460 return ret;
461
462 for (i = 0; i < data.nums; i++) {
463 if (pm == data.states[i])
464 break;
465 }
466
467 if (i == data.nums)
468 i = -EINVAL;
469
470 return sysfs_emit(buf, "%d\n", i);
471 }
472
amdgpu_get_pp_force_state(struct device * dev,struct device_attribute * attr,char * buf)473 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476 {
477 struct drm_device *ddev = dev_get_drvdata(dev);
478 struct amdgpu_device *adev = drm_to_adev(ddev);
479
480 if (adev->pm.pp_force_state_enabled)
481 return amdgpu_get_pp_cur_state(dev, attr, buf);
482 else
483 return sysfs_emit(buf, "\n");
484 }
485
amdgpu_set_pp_force_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)486 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
487 struct device_attribute *attr,
488 const char *buf,
489 size_t count)
490 {
491 struct drm_device *ddev = dev_get_drvdata(dev);
492 struct amdgpu_device *adev = drm_to_adev(ddev);
493 enum amd_pm_state_type state = 0;
494 struct pp_states_info data;
495 unsigned long idx;
496 int ret;
497
498 adev->pm.pp_force_state_enabled = false;
499
500 if (strlen(buf) == 1)
501 return count;
502
503 ret = kstrtoul(buf, 0, &idx);
504 if (ret || idx >= ARRAY_SIZE(data.states))
505 return -EINVAL;
506
507 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
508
509 ret = amdgpu_pm_get_access(adev);
510 if (ret < 0)
511 return ret;
512
513 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
514 if (ret)
515 goto err_out;
516
517 state = data.states[idx];
518
519 /* only set user selected power states */
520 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
521 state != POWER_STATE_TYPE_DEFAULT) {
522 ret = amdgpu_dpm_dispatch_task(adev,
523 AMD_PP_TASK_ENABLE_USER_STATE, &state);
524 if (ret)
525 goto err_out;
526
527 adev->pm.pp_force_state_enabled = true;
528 }
529
530 amdgpu_pm_put_access(adev);
531
532 return count;
533
534 err_out:
535 amdgpu_pm_put_access(adev);
536
537 return ret;
538 }
539
540 /**
541 * DOC: pp_table
542 *
543 * The amdgpu driver provides a sysfs API for uploading new powerplay
544 * tables. The file pp_table is used for this. Reading the file
545 * will dump the current power play table. Writing to the file
546 * will attempt to upload a new powerplay table and re-initialize
547 * powerplay using that new table.
548 *
549 */
550
amdgpu_get_pp_table(struct device * dev,struct device_attribute * attr,char * buf)551 static ssize_t amdgpu_get_pp_table(struct device *dev,
552 struct device_attribute *attr,
553 char *buf)
554 {
555 struct drm_device *ddev = dev_get_drvdata(dev);
556 struct amdgpu_device *adev = drm_to_adev(ddev);
557 char *table = NULL;
558 int size, ret;
559
560 ret = amdgpu_pm_get_access_if_active(adev);
561 if (ret)
562 return ret;
563
564 size = amdgpu_dpm_get_pp_table(adev, &table);
565
566 amdgpu_pm_put_access(adev);
567
568 if (size <= 0)
569 return size;
570
571 if (size >= PAGE_SIZE)
572 size = PAGE_SIZE - 1;
573
574 memcpy(buf, table, size);
575
576 return size;
577 }
578
amdgpu_set_pp_table(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)579 static ssize_t amdgpu_set_pp_table(struct device *dev,
580 struct device_attribute *attr,
581 const char *buf,
582 size_t count)
583 {
584 struct drm_device *ddev = dev_get_drvdata(dev);
585 struct amdgpu_device *adev = drm_to_adev(ddev);
586 int ret = 0;
587
588 ret = amdgpu_pm_get_access(adev);
589 if (ret < 0)
590 return ret;
591
592 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
593
594 amdgpu_pm_put_access(adev);
595
596 if (ret)
597 return ret;
598
599 return count;
600 }
601
602 /**
603 * DOC: pp_od_clk_voltage
604 *
605 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
606 * in each power level within a power state. The pp_od_clk_voltage is used for
607 * this.
608 *
609 * Note that the actual memory controller clock rate are exposed, not
610 * the effective memory clock of the DRAMs. To translate it, use the
611 * following formula:
612 *
613 * Clock conversion (Mhz):
614 *
615 * HBM: effective_memory_clock = memory_controller_clock * 1
616 *
617 * G5: effective_memory_clock = memory_controller_clock * 1
618 *
619 * G6: effective_memory_clock = memory_controller_clock * 2
620 *
621 * DRAM data rate (MT/s):
622 *
623 * HBM: effective_memory_clock * 2 = data_rate
624 *
625 * G5: effective_memory_clock * 4 = data_rate
626 *
627 * G6: effective_memory_clock * 8 = data_rate
628 *
629 * Bandwidth (MB/s):
630 *
631 * data_rate * vram_bit_width / 8 = memory_bandwidth
632 *
633 * Some examples:
634 *
635 * G5 on RX460:
636 *
637 * memory_controller_clock = 1750 Mhz
638 *
639 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
640 *
641 * data rate = 1750 * 4 = 7000 MT/s
642 *
643 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
644 *
645 * G6 on RX5700:
646 *
647 * memory_controller_clock = 875 Mhz
648 *
649 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
650 *
651 * data rate = 1750 * 8 = 14000 MT/s
652 *
653 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
654 *
655 * < For Vega10 and previous ASICs >
656 *
657 * Reading the file will display:
658 *
659 * - a list of engine clock levels and voltages labeled OD_SCLK
660 *
661 * - a list of memory clock levels and voltages labeled OD_MCLK
662 *
663 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
664 *
665 * To manually adjust these settings, first select manual using
666 * power_dpm_force_performance_level. Enter a new value for each
667 * level by writing a string that contains "s/m level clock voltage" to
668 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
669 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
670 * 810 mV. When you have edited all of the states as needed, write
671 * "c" (commit) to the file to commit your changes. If you want to reset to the
672 * default power levels, write "r" (reset) to the file to reset them.
673 *
674 *
675 * < For Vega20 and newer ASICs >
676 *
677 * Reading the file will display:
678 *
679 * - minimum and maximum engine clock labeled OD_SCLK
680 *
681 * - minimum(not available for Vega20 and Navi1x) and maximum memory
682 * clock labeled OD_MCLK
683 *
684 * - minimum and maximum fabric clock labeled OD_FCLK (SMU13)
685 *
686 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
687 * They can be used to calibrate the sclk voltage curve. This is
688 * available for Vega20 and NV1X.
689 *
690 * - voltage offset(in mV) applied on target voltage calculation.
691 * This is available for Sienna Cichlid, Navy Flounder, Dimgrey
692 * Cavefish and some later SMU13 ASICs. For these ASICs, the target
693 * voltage calculation can be illustrated by "voltage = voltage
694 * calculated from v/f curve + overdrive vddgfx offset"
695 *
696 * - a list of valid ranges for sclk, mclk, voltage curve points
697 * or voltage offset labeled OD_RANGE
698 *
699 * < For APUs >
700 *
701 * Reading the file will display:
702 *
703 * - minimum and maximum engine clock labeled OD_SCLK
704 *
705 * - a list of valid ranges for sclk labeled OD_RANGE
706 *
707 * < For VanGogh >
708 *
709 * Reading the file will display:
710 *
711 * - minimum and maximum engine clock labeled OD_SCLK
712 * - minimum and maximum core clocks labeled OD_CCLK
713 *
714 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
715 *
716 * To manually adjust these settings:
717 *
718 * - First select manual using power_dpm_force_performance_level
719 *
720 * - For clock frequency setting, enter a new value by writing a
721 * string that contains "s/m/f index clock" to the file. The index
722 * should be 0 if to set minimum clock. And 1 if to set maximum
723 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
724 * "m 1 800" will update maximum mclk to be 800Mhz. "f 1 1600" will
725 * update maximum fabric clock to be 1600Mhz. For core
726 * clocks on VanGogh, the string contains "p core index clock".
727 * E.g., "p 2 0 800" would set the minimum core clock on core
728 * 2 to 800Mhz.
729 *
730 * For sclk voltage curve supported by Vega20 and NV1X, enter the new
731 * values by writing a string that contains "vc point clock voltage"
732 * to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
733 * 600" will update point1 with clock set as 300Mhz and voltage as 600mV.
734 * "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
735 * voltage 1000mV.
736 *
737 * For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
738 * Cavefish and some later SMU13 ASICs, enter the new value by writing a
739 * string that contains "vo offset". E.g., "vo -10" will update the extra
740 * voltage offset applied to the whole v/f curve line as -10mv.
741 *
742 * - When you have edited all of the states as needed, write "c" (commit)
743 * to the file to commit your changes
744 *
745 * - If you want to reset to the default power levels, write "r" (reset)
746 * to the file to reset them
747 *
748 */
749
amdgpu_set_pp_od_clk_voltage(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)750 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
751 struct device_attribute *attr,
752 const char *buf,
753 size_t count)
754 {
755 struct drm_device *ddev = dev_get_drvdata(dev);
756 struct amdgpu_device *adev = drm_to_adev(ddev);
757 int ret;
758 uint32_t parameter_size = 0;
759 long parameter[64];
760 char buf_cpy[128];
761 char *tmp_str;
762 char *sub_str;
763 const char delimiter[3] = {' ', '\n', '\0'};
764 uint32_t type;
765
766 if (count > 127 || count == 0)
767 return -EINVAL;
768
769 if (*buf == 's')
770 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
771 else if (*buf == 'p')
772 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
773 else if (*buf == 'm')
774 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
775 else if (*buf == 'f')
776 type = PP_OD_EDIT_FCLK_TABLE;
777 else if (*buf == 'r')
778 type = PP_OD_RESTORE_DEFAULT_TABLE;
779 else if (*buf == 'c')
780 type = PP_OD_COMMIT_DPM_TABLE;
781 else if (!strncmp(buf, "vc", 2))
782 type = PP_OD_EDIT_VDDC_CURVE;
783 else if (!strncmp(buf, "vo", 2))
784 type = PP_OD_EDIT_VDDGFX_OFFSET;
785 else
786 return -EINVAL;
787
788 memcpy(buf_cpy, buf, count);
789 buf_cpy[count] = 0;
790
791 tmp_str = buf_cpy;
792
793 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
794 (type == PP_OD_EDIT_VDDGFX_OFFSET))
795 tmp_str++;
796 while (isspace(*++tmp_str));
797
798 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
799 if (strlen(sub_str) == 0)
800 continue;
801 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
802 if (ret)
803 return -EINVAL;
804 parameter_size++;
805
806 if (!tmp_str)
807 break;
808
809 while (isspace(*tmp_str))
810 tmp_str++;
811 }
812
813 ret = amdgpu_pm_get_access(adev);
814 if (ret < 0)
815 return ret;
816
817 if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
818 type,
819 parameter,
820 parameter_size))
821 goto err_out;
822
823 if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
824 parameter, parameter_size))
825 goto err_out;
826
827 if (type == PP_OD_COMMIT_DPM_TABLE) {
828 if (amdgpu_dpm_dispatch_task(adev,
829 AMD_PP_TASK_READJUST_POWER_STATE,
830 NULL))
831 goto err_out;
832 }
833
834 amdgpu_pm_put_access(adev);
835
836 return count;
837
838 err_out:
839 amdgpu_pm_put_access(adev);
840
841 return -EINVAL;
842 }
843
amdgpu_get_pp_od_clk_voltage(struct device * dev,struct device_attribute * attr,char * buf)844 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
845 struct device_attribute *attr,
846 char *buf)
847 {
848 struct drm_device *ddev = dev_get_drvdata(dev);
849 struct amdgpu_device *adev = drm_to_adev(ddev);
850 int size = 0;
851 int ret;
852 enum pp_clock_type od_clocks[] = {
853 OD_SCLK,
854 OD_MCLK,
855 OD_FCLK,
856 OD_VDDC_CURVE,
857 OD_RANGE,
858 OD_VDDGFX_OFFSET,
859 OD_CCLK,
860 };
861 uint clk_index;
862
863 ret = amdgpu_pm_get_access_if_active(adev);
864 if (ret)
865 return ret;
866
867 for (clk_index = 0 ; clk_index < ARRAY_SIZE(od_clocks) ; clk_index++) {
868 amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
869 }
870
871 if (size == 0)
872 size = sysfs_emit(buf, "\n");
873
874 amdgpu_pm_put_access(adev);
875
876 return size;
877 }
878
879 /**
880 * DOC: pp_features
881 *
882 * The amdgpu driver provides a sysfs API for adjusting what powerplay
883 * features to be enabled. The file pp_features is used for this. And
884 * this is only available for Vega10 and later dGPUs.
885 *
886 * Reading back the file will show you the followings:
887 * - Current ppfeature masks
888 * - List of the all supported powerplay features with their naming,
889 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
890 *
891 * To manually enable or disable a specific feature, just set or clear
892 * the corresponding bit from original ppfeature masks and input the
893 * new ppfeature masks.
894 */
amdgpu_set_pp_features(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)895 static ssize_t amdgpu_set_pp_features(struct device *dev,
896 struct device_attribute *attr,
897 const char *buf,
898 size_t count)
899 {
900 struct drm_device *ddev = dev_get_drvdata(dev);
901 struct amdgpu_device *adev = drm_to_adev(ddev);
902 uint64_t featuremask;
903 int ret;
904
905 ret = kstrtou64(buf, 0, &featuremask);
906 if (ret)
907 return -EINVAL;
908
909 ret = amdgpu_pm_get_access(adev);
910 if (ret < 0)
911 return ret;
912
913 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
914
915 amdgpu_pm_put_access(adev);
916
917 if (ret)
918 return -EINVAL;
919
920 return count;
921 }
922
amdgpu_get_pp_features(struct device * dev,struct device_attribute * attr,char * buf)923 static ssize_t amdgpu_get_pp_features(struct device *dev,
924 struct device_attribute *attr,
925 char *buf)
926 {
927 struct drm_device *ddev = dev_get_drvdata(dev);
928 struct amdgpu_device *adev = drm_to_adev(ddev);
929 ssize_t size;
930 int ret;
931
932 ret = amdgpu_pm_get_access_if_active(adev);
933 if (ret)
934 return ret;
935
936 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
937 if (size <= 0)
938 size = sysfs_emit(buf, "\n");
939
940 amdgpu_pm_put_access(adev);
941
942 return size;
943 }
944
945 /**
946 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
947 *
948 * The amdgpu driver provides a sysfs API for adjusting what power levels
949 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
950 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
951 * this.
952 *
953 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
954 * Vega10 and later ASICs.
955 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
956 *
957 * Reading back the files will show you the available power levels within
958 * the power state and the clock information for those levels. If deep sleep is
959 * applied to a clock, the level will be denoted by a special level 'S:'
960 * E.g., ::
961 *
962 * S: 19Mhz *
963 * 0: 615Mhz
964 * 1: 800Mhz
965 * 2: 888Mhz
966 * 3: 1000Mhz
967 *
968 *
969 * To manually adjust these states, first select manual using
970 * power_dpm_force_performance_level.
971 * Secondly, enter a new value for each level by inputing a string that
972 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
973 * E.g.,
974 *
975 * .. code-block:: bash
976 *
977 * echo "4 5 6" > pp_dpm_sclk
978 *
979 * will enable sclk levels 4, 5, and 6.
980 *
981 * NOTE: change to the dcefclk max dpm level is not supported now
982 */
983
amdgpu_get_pp_dpm_clock(struct device * dev,enum pp_clock_type type,char * buf)984 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
985 enum pp_clock_type type,
986 char *buf)
987 {
988 struct drm_device *ddev = dev_get_drvdata(dev);
989 struct amdgpu_device *adev = drm_to_adev(ddev);
990 int size = 0;
991 int ret = 0;
992
993 ret = amdgpu_pm_get_access_if_active(adev);
994 if (ret)
995 return ret;
996
997 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
998 if (ret) {
999 size = ret;
1000 goto out_pm_put;
1001 }
1002
1003 if (size == 0)
1004 size = sysfs_emit(buf, "\n");
1005
1006 out_pm_put:
1007 amdgpu_pm_put_access(adev);
1008
1009 return size;
1010 }
1011
1012 /*
1013 * Worst case: 32 bits individually specified, in octal at 12 characters
1014 * per line (+1 for \n).
1015 */
1016 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1017
amdgpu_read_mask(const char * buf,size_t count,uint32_t * mask)1018 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1019 {
1020 int ret;
1021 unsigned long level;
1022 char *sub_str = NULL;
1023 char *tmp;
1024 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1025 const char delimiter[3] = {' ', '\n', '\0'};
1026 size_t bytes;
1027
1028 *mask = 0;
1029
1030 bytes = min(count, sizeof(buf_cpy) - 1);
1031 memcpy(buf_cpy, buf, bytes);
1032 buf_cpy[bytes] = '\0';
1033 tmp = buf_cpy;
1034 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1035 if (strlen(sub_str)) {
1036 ret = kstrtoul(sub_str, 0, &level);
1037 if (ret || level > 31)
1038 return -EINVAL;
1039 *mask |= 1 << level;
1040 } else
1041 break;
1042 }
1043
1044 return 0;
1045 }
1046
amdgpu_set_pp_dpm_clock(struct device * dev,enum pp_clock_type type,const char * buf,size_t count)1047 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1048 enum pp_clock_type type,
1049 const char *buf,
1050 size_t count)
1051 {
1052 struct drm_device *ddev = dev_get_drvdata(dev);
1053 struct amdgpu_device *adev = drm_to_adev(ddev);
1054 int ret;
1055 uint32_t mask = 0;
1056
1057 ret = amdgpu_read_mask(buf, count, &mask);
1058 if (ret)
1059 return ret;
1060
1061 ret = amdgpu_pm_get_access(adev);
1062 if (ret < 0)
1063 return ret;
1064
1065 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1066
1067 amdgpu_pm_put_access(adev);
1068
1069 if (ret)
1070 return -EINVAL;
1071
1072 return count;
1073 }
1074
amdgpu_get_pp_dpm_sclk(struct device * dev,struct device_attribute * attr,char * buf)1075 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1076 struct device_attribute *attr,
1077 char *buf)
1078 {
1079 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1080 }
1081
amdgpu_set_pp_dpm_sclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1082 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1083 struct device_attribute *attr,
1084 const char *buf,
1085 size_t count)
1086 {
1087 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1088 }
1089
amdgpu_get_pp_dpm_mclk(struct device * dev,struct device_attribute * attr,char * buf)1090 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1091 struct device_attribute *attr,
1092 char *buf)
1093 {
1094 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1095 }
1096
amdgpu_set_pp_dpm_mclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1097 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1098 struct device_attribute *attr,
1099 const char *buf,
1100 size_t count)
1101 {
1102 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1103 }
1104
amdgpu_get_pp_dpm_socclk(struct device * dev,struct device_attribute * attr,char * buf)1105 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1106 struct device_attribute *attr,
1107 char *buf)
1108 {
1109 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1110 }
1111
amdgpu_set_pp_dpm_socclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1112 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1113 struct device_attribute *attr,
1114 const char *buf,
1115 size_t count)
1116 {
1117 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1118 }
1119
amdgpu_get_pp_dpm_fclk(struct device * dev,struct device_attribute * attr,char * buf)1120 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1121 struct device_attribute *attr,
1122 char *buf)
1123 {
1124 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1125 }
1126
amdgpu_set_pp_dpm_fclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1127 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1128 struct device_attribute *attr,
1129 const char *buf,
1130 size_t count)
1131 {
1132 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1133 }
1134
amdgpu_get_pp_dpm_vclk(struct device * dev,struct device_attribute * attr,char * buf)1135 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1136 struct device_attribute *attr,
1137 char *buf)
1138 {
1139 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1140 }
1141
amdgpu_set_pp_dpm_vclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1142 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1143 struct device_attribute *attr,
1144 const char *buf,
1145 size_t count)
1146 {
1147 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1148 }
1149
amdgpu_get_pp_dpm_vclk1(struct device * dev,struct device_attribute * attr,char * buf)1150 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1151 struct device_attribute *attr,
1152 char *buf)
1153 {
1154 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1155 }
1156
amdgpu_set_pp_dpm_vclk1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1157 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1158 struct device_attribute *attr,
1159 const char *buf,
1160 size_t count)
1161 {
1162 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1163 }
1164
amdgpu_get_pp_dpm_dclk(struct device * dev,struct device_attribute * attr,char * buf)1165 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1166 struct device_attribute *attr,
1167 char *buf)
1168 {
1169 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1170 }
1171
amdgpu_set_pp_dpm_dclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1172 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1173 struct device_attribute *attr,
1174 const char *buf,
1175 size_t count)
1176 {
1177 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1178 }
1179
amdgpu_get_pp_dpm_dclk1(struct device * dev,struct device_attribute * attr,char * buf)1180 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1181 struct device_attribute *attr,
1182 char *buf)
1183 {
1184 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1185 }
1186
amdgpu_set_pp_dpm_dclk1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1187 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1188 struct device_attribute *attr,
1189 const char *buf,
1190 size_t count)
1191 {
1192 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1193 }
1194
amdgpu_get_pp_dpm_dcefclk(struct device * dev,struct device_attribute * attr,char * buf)1195 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1196 struct device_attribute *attr,
1197 char *buf)
1198 {
1199 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1200 }
1201
amdgpu_set_pp_dpm_dcefclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1202 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1203 struct device_attribute *attr,
1204 const char *buf,
1205 size_t count)
1206 {
1207 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1208 }
1209
amdgpu_get_pp_dpm_pcie(struct device * dev,struct device_attribute * attr,char * buf)1210 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1211 struct device_attribute *attr,
1212 char *buf)
1213 {
1214 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1215 }
1216
amdgpu_set_pp_dpm_pcie(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1217 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1218 struct device_attribute *attr,
1219 const char *buf,
1220 size_t count)
1221 {
1222 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1223 }
1224
amdgpu_get_pp_sclk_od(struct device * dev,struct device_attribute * attr,char * buf)1225 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1226 struct device_attribute *attr,
1227 char *buf)
1228 {
1229 struct drm_device *ddev = dev_get_drvdata(dev);
1230 struct amdgpu_device *adev = drm_to_adev(ddev);
1231 uint32_t value = 0;
1232 int ret;
1233
1234 ret = amdgpu_pm_get_access_if_active(adev);
1235 if (ret)
1236 return ret;
1237
1238 value = amdgpu_dpm_get_sclk_od(adev);
1239
1240 amdgpu_pm_put_access(adev);
1241
1242 return sysfs_emit(buf, "%d\n", value);
1243 }
1244
amdgpu_set_pp_sclk_od(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1245 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1246 struct device_attribute *attr,
1247 const char *buf,
1248 size_t count)
1249 {
1250 struct drm_device *ddev = dev_get_drvdata(dev);
1251 struct amdgpu_device *adev = drm_to_adev(ddev);
1252 int ret;
1253 long int value;
1254
1255 ret = kstrtol(buf, 0, &value);
1256
1257 if (ret)
1258 return -EINVAL;
1259
1260 ret = amdgpu_pm_get_access(adev);
1261 if (ret < 0)
1262 return ret;
1263
1264 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1265
1266 amdgpu_pm_put_access(adev);
1267
1268 return count;
1269 }
1270
amdgpu_get_pp_mclk_od(struct device * dev,struct device_attribute * attr,char * buf)1271 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1272 struct device_attribute *attr,
1273 char *buf)
1274 {
1275 struct drm_device *ddev = dev_get_drvdata(dev);
1276 struct amdgpu_device *adev = drm_to_adev(ddev);
1277 uint32_t value = 0;
1278 int ret;
1279
1280 ret = amdgpu_pm_get_access_if_active(adev);
1281 if (ret)
1282 return ret;
1283
1284 value = amdgpu_dpm_get_mclk_od(adev);
1285
1286 amdgpu_pm_put_access(adev);
1287
1288 return sysfs_emit(buf, "%d\n", value);
1289 }
1290
amdgpu_set_pp_mclk_od(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1291 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1292 struct device_attribute *attr,
1293 const char *buf,
1294 size_t count)
1295 {
1296 struct drm_device *ddev = dev_get_drvdata(dev);
1297 struct amdgpu_device *adev = drm_to_adev(ddev);
1298 int ret;
1299 long int value;
1300
1301 ret = kstrtol(buf, 0, &value);
1302
1303 if (ret)
1304 return -EINVAL;
1305
1306 ret = amdgpu_pm_get_access(adev);
1307 if (ret < 0)
1308 return ret;
1309
1310 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1311
1312 amdgpu_pm_put_access(adev);
1313
1314 return count;
1315 }
1316
1317 /**
1318 * DOC: pp_power_profile_mode
1319 *
1320 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1321 * related to switching between power levels in a power state. The file
1322 * pp_power_profile_mode is used for this.
1323 *
1324 * Reading this file outputs a list of all of the predefined power profiles
1325 * and the relevant heuristics settings for that profile.
1326 *
1327 * To select a profile or create a custom profile, first select manual using
1328 * power_dpm_force_performance_level. Writing the number of a predefined
1329 * profile to pp_power_profile_mode will enable those heuristics. To
1330 * create a custom set of heuristics, write a string of numbers to the file
1331 * starting with the number of the custom profile along with a setting
1332 * for each heuristic parameter. Due to differences across asic families
1333 * the heuristic parameters vary from family to family. Additionally,
1334 * you can apply the custom heuristics to different clock domains. Each
1335 * clock domain is considered a distinct operation so if you modify the
1336 * gfxclk heuristics and then the memclk heuristics, the all of the
1337 * custom heuristics will be retained until you switch to another profile.
1338 *
1339 */
1340
amdgpu_get_pp_power_profile_mode(struct device * dev,struct device_attribute * attr,char * buf)1341 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1342 struct device_attribute *attr,
1343 char *buf)
1344 {
1345 struct drm_device *ddev = dev_get_drvdata(dev);
1346 struct amdgpu_device *adev = drm_to_adev(ddev);
1347 ssize_t size;
1348 int ret;
1349
1350 ret = amdgpu_pm_get_access_if_active(adev);
1351 if (ret)
1352 return ret;
1353
1354 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1355 if (size <= 0)
1356 size = sysfs_emit(buf, "\n");
1357
1358 amdgpu_pm_put_access(adev);
1359
1360 return size;
1361 }
1362
1363
amdgpu_set_pp_power_profile_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1364 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1365 struct device_attribute *attr,
1366 const char *buf,
1367 size_t count)
1368 {
1369 int ret;
1370 struct drm_device *ddev = dev_get_drvdata(dev);
1371 struct amdgpu_device *adev = drm_to_adev(ddev);
1372 uint32_t parameter_size = 0;
1373 long parameter[64];
1374 char *sub_str, buf_cpy[128];
1375 char *tmp_str;
1376 uint32_t i = 0;
1377 char tmp[2];
1378 long int profile_mode = 0;
1379 const char delimiter[3] = {' ', '\n', '\0'};
1380
1381 tmp[0] = *(buf);
1382 tmp[1] = '\0';
1383 ret = kstrtol(tmp, 0, &profile_mode);
1384 if (ret)
1385 return -EINVAL;
1386
1387 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1388 if (count < 2 || count > 127)
1389 return -EINVAL;
1390 while (isspace(*++buf))
1391 i++;
1392 memcpy(buf_cpy, buf, count-i);
1393 tmp_str = buf_cpy;
1394 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1395 if (strlen(sub_str) == 0)
1396 continue;
1397 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1398 if (ret)
1399 return -EINVAL;
1400 parameter_size++;
1401 if (!tmp_str)
1402 break;
1403 while (isspace(*tmp_str))
1404 tmp_str++;
1405 }
1406 }
1407 parameter[parameter_size] = profile_mode;
1408
1409 ret = amdgpu_pm_get_access(adev);
1410 if (ret < 0)
1411 return ret;
1412
1413 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1414
1415 amdgpu_pm_put_access(adev);
1416
1417 if (!ret)
1418 return count;
1419
1420 return -EINVAL;
1421 }
1422
amdgpu_pm_get_sensor_generic(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * query)1423 static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev,
1424 enum amd_pp_sensors sensor,
1425 void *query)
1426 {
1427 int r, size = sizeof(uint32_t);
1428
1429 r = amdgpu_pm_get_access_if_active(adev);
1430 if (r)
1431 return r;
1432
1433 /* get the sensor value */
1434 r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1435
1436 amdgpu_pm_put_access(adev);
1437
1438 return r;
1439 }
1440
1441 /**
1442 * DOC: gpu_busy_percent
1443 *
1444 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1445 * is as a percentage. The file gpu_busy_percent is used for this.
1446 * The SMU firmware computes a percentage of load based on the
1447 * aggregate activity level in the IP cores.
1448 */
amdgpu_get_gpu_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1449 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1450 struct device_attribute *attr,
1451 char *buf)
1452 {
1453 struct drm_device *ddev = dev_get_drvdata(dev);
1454 struct amdgpu_device *adev = drm_to_adev(ddev);
1455 unsigned int value;
1456 int r;
1457
1458 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1459 if (r)
1460 return r;
1461
1462 return sysfs_emit(buf, "%d\n", value);
1463 }
1464
1465 /**
1466 * DOC: mem_busy_percent
1467 *
1468 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1469 * is as a percentage. The file mem_busy_percent is used for this.
1470 * The SMU firmware computes a percentage of load based on the
1471 * aggregate activity level in the IP cores.
1472 */
amdgpu_get_mem_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1473 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1474 struct device_attribute *attr,
1475 char *buf)
1476 {
1477 struct drm_device *ddev = dev_get_drvdata(dev);
1478 struct amdgpu_device *adev = drm_to_adev(ddev);
1479 unsigned int value;
1480 int r;
1481
1482 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1483 if (r)
1484 return r;
1485
1486 return sysfs_emit(buf, "%d\n", value);
1487 }
1488
1489 /**
1490 * DOC: vcn_busy_percent
1491 *
1492 * The amdgpu driver provides a sysfs API for reading how busy the VCN
1493 * is as a percentage. The file vcn_busy_percent is used for this.
1494 * The SMU firmware computes a percentage of load based on the
1495 * aggregate activity level in the IP cores.
1496 */
amdgpu_get_vcn_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1497 static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
1498 struct device_attribute *attr,
1499 char *buf)
1500 {
1501 struct drm_device *ddev = dev_get_drvdata(dev);
1502 struct amdgpu_device *adev = drm_to_adev(ddev);
1503 unsigned int value;
1504 int r;
1505
1506 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
1507 if (r)
1508 return r;
1509
1510 return sysfs_emit(buf, "%d\n", value);
1511 }
1512
1513 /**
1514 * DOC: pcie_bw
1515 *
1516 * The amdgpu driver provides a sysfs API for estimating how much data
1517 * has been received and sent by the GPU in the last second through PCIe.
1518 * The file pcie_bw is used for this.
1519 * The Perf counters count the number of received and sent messages and return
1520 * those values, as well as the maximum payload size of a PCIe packet (mps).
1521 * Note that it is not possible to easily and quickly obtain the size of each
1522 * packet transmitted, so we output the max payload size (mps) to allow for
1523 * quick estimation of the PCIe bandwidth usage
1524 */
amdgpu_get_pcie_bw(struct device * dev,struct device_attribute * attr,char * buf)1525 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1526 struct device_attribute *attr,
1527 char *buf)
1528 {
1529 struct drm_device *ddev = dev_get_drvdata(dev);
1530 struct amdgpu_device *adev = drm_to_adev(ddev);
1531 uint64_t count0 = 0, count1 = 0;
1532 int ret;
1533
1534 if (adev->flags & AMD_IS_APU)
1535 return -ENODATA;
1536
1537 if (!adev->asic_funcs->get_pcie_usage)
1538 return -ENODATA;
1539
1540 ret = amdgpu_pm_get_access_if_active(adev);
1541 if (ret)
1542 return ret;
1543
1544 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1545
1546 amdgpu_pm_put_access(adev);
1547
1548 return sysfs_emit(buf, "%llu %llu %i\n",
1549 count0, count1, pcie_get_mps(adev->pdev));
1550 }
1551
1552 /**
1553 * DOC: unique_id
1554 *
1555 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1556 * The file unique_id is used for this.
1557 * This will provide a Unique ID that will persist from machine to machine
1558 *
1559 * NOTE: This will only work for GFX9 and newer. This file will be absent
1560 * on unsupported ASICs (GFX8 and older)
1561 */
amdgpu_get_unique_id(struct device * dev,struct device_attribute * attr,char * buf)1562 static ssize_t amdgpu_get_unique_id(struct device *dev,
1563 struct device_attribute *attr,
1564 char *buf)
1565 {
1566 struct drm_device *ddev = dev_get_drvdata(dev);
1567 struct amdgpu_device *adev = drm_to_adev(ddev);
1568
1569 if (adev->unique_id)
1570 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1571
1572 return 0;
1573 }
1574
1575 /**
1576 * DOC: thermal_throttling_logging
1577 *
1578 * Thermal throttling pulls down the clock frequency and thus the performance.
1579 * It's an useful mechanism to protect the chip from overheating. Since it
1580 * impacts performance, the user controls whether it is enabled and if so,
1581 * the log frequency.
1582 *
1583 * Reading back the file shows you the status(enabled or disabled) and
1584 * the interval(in seconds) between each thermal logging.
1585 *
1586 * Writing an integer to the file, sets a new logging interval, in seconds.
1587 * The value should be between 1 and 3600. If the value is less than 1,
1588 * thermal logging is disabled. Values greater than 3600 are ignored.
1589 */
amdgpu_get_thermal_throttling_logging(struct device * dev,struct device_attribute * attr,char * buf)1590 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1591 struct device_attribute *attr,
1592 char *buf)
1593 {
1594 struct drm_device *ddev = dev_get_drvdata(dev);
1595 struct amdgpu_device *adev = drm_to_adev(ddev);
1596
1597 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1598 adev_to_drm(adev)->unique,
1599 str_enabled_disabled(atomic_read(&adev->throttling_logging_enabled)),
1600 adev->throttling_logging_rs.interval / HZ + 1);
1601 }
1602
amdgpu_set_thermal_throttling_logging(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1603 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1604 struct device_attribute *attr,
1605 const char *buf,
1606 size_t count)
1607 {
1608 struct drm_device *ddev = dev_get_drvdata(dev);
1609 struct amdgpu_device *adev = drm_to_adev(ddev);
1610 long throttling_logging_interval;
1611 int ret = 0;
1612
1613 ret = kstrtol(buf, 0, &throttling_logging_interval);
1614 if (ret)
1615 return ret;
1616
1617 if (throttling_logging_interval > 3600)
1618 return -EINVAL;
1619
1620 if (throttling_logging_interval > 0) {
1621 /*
1622 * Reset the ratelimit timer internals.
1623 * This can effectively restart the timer.
1624 */
1625 ratelimit_state_reset_interval(&adev->throttling_logging_rs,
1626 (throttling_logging_interval - 1) * HZ);
1627 atomic_set(&adev->throttling_logging_enabled, 1);
1628 } else {
1629 atomic_set(&adev->throttling_logging_enabled, 0);
1630 }
1631
1632 return count;
1633 }
1634
1635 /**
1636 * DOC: apu_thermal_cap
1637 *
1638 * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1639 * limit temperature in millidegrees Celsius
1640 *
1641 * Reading back the file shows you core limit value
1642 *
1643 * Writing an integer to the file, sets a new thermal limit. The value
1644 * should be between 0 and 100. If the value is less than 0 or greater
1645 * than 100, then the write request will be ignored.
1646 */
amdgpu_get_apu_thermal_cap(struct device * dev,struct device_attribute * attr,char * buf)1647 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1648 struct device_attribute *attr,
1649 char *buf)
1650 {
1651 int ret, size;
1652 u32 limit;
1653 struct drm_device *ddev = dev_get_drvdata(dev);
1654 struct amdgpu_device *adev = drm_to_adev(ddev);
1655
1656 ret = amdgpu_pm_get_access_if_active(adev);
1657 if (ret)
1658 return ret;
1659
1660 ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1661 if (!ret)
1662 size = sysfs_emit(buf, "%u\n", limit);
1663 else
1664 size = sysfs_emit(buf, "failed to get thermal limit\n");
1665
1666 amdgpu_pm_put_access(adev);
1667
1668 return size;
1669 }
1670
amdgpu_set_apu_thermal_cap(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1671 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1672 struct device_attribute *attr,
1673 const char *buf,
1674 size_t count)
1675 {
1676 int ret;
1677 u32 value;
1678 struct drm_device *ddev = dev_get_drvdata(dev);
1679 struct amdgpu_device *adev = drm_to_adev(ddev);
1680
1681 ret = kstrtou32(buf, 10, &value);
1682 if (ret)
1683 return ret;
1684
1685 if (value > 100) {
1686 dev_err(dev, "Invalid argument !\n");
1687 return -EINVAL;
1688 }
1689
1690 ret = amdgpu_pm_get_access(adev);
1691 if (ret < 0)
1692 return ret;
1693
1694 ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1695 if (ret) {
1696 amdgpu_pm_put_access(adev);
1697 dev_err(dev, "failed to update thermal limit\n");
1698 return ret;
1699 }
1700
1701 amdgpu_pm_put_access(adev);
1702
1703 return count;
1704 }
1705
amdgpu_pm_metrics_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1706 static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
1707 struct amdgpu_device_attr *attr,
1708 uint32_t mask,
1709 enum amdgpu_device_attr_states *states)
1710 {
1711 if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
1712 *states = ATTR_STATE_UNSUPPORTED;
1713
1714 return 0;
1715 }
1716
amdgpu_get_pm_metrics(struct device * dev,struct device_attribute * attr,char * buf)1717 static ssize_t amdgpu_get_pm_metrics(struct device *dev,
1718 struct device_attribute *attr, char *buf)
1719 {
1720 struct drm_device *ddev = dev_get_drvdata(dev);
1721 struct amdgpu_device *adev = drm_to_adev(ddev);
1722 ssize_t size = 0;
1723 int ret;
1724
1725 ret = amdgpu_pm_get_access_if_active(adev);
1726 if (ret)
1727 return ret;
1728
1729 size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
1730
1731 amdgpu_pm_put_access(adev);
1732
1733 return size;
1734 }
1735
1736 /**
1737 * DOC: gpu_metrics
1738 *
1739 * The amdgpu driver provides a sysfs API for retrieving current gpu
1740 * metrics data. The file gpu_metrics is used for this. Reading the
1741 * file will dump all the current gpu metrics data.
1742 *
1743 * These data include temperature, frequency, engines utilization,
1744 * power consume, throttler status, fan speed and cpu core statistics(
1745 * available for APU only). That's it will give a snapshot of all sensors
1746 * at the same time.
1747 */
amdgpu_get_gpu_metrics(struct device * dev,struct device_attribute * attr,char * buf)1748 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1749 struct device_attribute *attr,
1750 char *buf)
1751 {
1752 struct drm_device *ddev = dev_get_drvdata(dev);
1753 struct amdgpu_device *adev = drm_to_adev(ddev);
1754 void *gpu_metrics;
1755 ssize_t size = 0;
1756 int ret;
1757
1758 ret = amdgpu_pm_get_access_if_active(adev);
1759 if (ret)
1760 return ret;
1761
1762 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1763 if (size <= 0)
1764 goto out;
1765
1766 if (size >= PAGE_SIZE)
1767 size = PAGE_SIZE - 1;
1768
1769 memcpy(buf, gpu_metrics, size);
1770
1771 out:
1772 amdgpu_pm_put_access(adev);
1773
1774 return size;
1775 }
1776
amdgpu_show_powershift_percent(struct device * dev,char * buf,enum amd_pp_sensors sensor)1777 static int amdgpu_show_powershift_percent(struct device *dev,
1778 char *buf, enum amd_pp_sensors sensor)
1779 {
1780 struct drm_device *ddev = dev_get_drvdata(dev);
1781 struct amdgpu_device *adev = drm_to_adev(ddev);
1782 uint32_t ss_power;
1783 int r = 0, i;
1784
1785 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1786 if (r == -EOPNOTSUPP) {
1787 /* sensor not available on dGPU, try to read from APU */
1788 adev = NULL;
1789 mutex_lock(&mgpu_info.mutex);
1790 for (i = 0; i < mgpu_info.num_gpu; i++) {
1791 if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1792 adev = mgpu_info.gpu_ins[i].adev;
1793 break;
1794 }
1795 }
1796 mutex_unlock(&mgpu_info.mutex);
1797 if (adev)
1798 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1799 }
1800
1801 if (r)
1802 return r;
1803
1804 return sysfs_emit(buf, "%u%%\n", ss_power);
1805 }
1806
1807 /**
1808 * DOC: smartshift_apu_power
1809 *
1810 * The amdgpu driver provides a sysfs API for reporting APU power
1811 * shift in percentage if platform supports smartshift. Value 0 means that
1812 * there is no powershift and values between [1-100] means that the power
1813 * is shifted to APU, the percentage of boost is with respect to APU power
1814 * limit on the platform.
1815 */
1816
amdgpu_get_smartshift_apu_power(struct device * dev,struct device_attribute * attr,char * buf)1817 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1818 char *buf)
1819 {
1820 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1821 }
1822
1823 /**
1824 * DOC: smartshift_dgpu_power
1825 *
1826 * The amdgpu driver provides a sysfs API for reporting dGPU power
1827 * shift in percentage if platform supports smartshift. Value 0 means that
1828 * there is no powershift and values between [1-100] means that the power is
1829 * shifted to dGPU, the percentage of boost is with respect to dGPU power
1830 * limit on the platform.
1831 */
1832
amdgpu_get_smartshift_dgpu_power(struct device * dev,struct device_attribute * attr,char * buf)1833 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1834 char *buf)
1835 {
1836 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1837 }
1838
1839 /**
1840 * DOC: smartshift_bias
1841 *
1842 * The amdgpu driver provides a sysfs API for reporting the
1843 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1844 * and the default is 0. -100 sets maximum preference to APU
1845 * and 100 sets max perference to dGPU.
1846 */
1847
amdgpu_get_smartshift_bias(struct device * dev,struct device_attribute * attr,char * buf)1848 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1849 struct device_attribute *attr,
1850 char *buf)
1851 {
1852 int r = 0;
1853
1854 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1855
1856 return r;
1857 }
1858
amdgpu_set_smartshift_bias(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1859 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1860 struct device_attribute *attr,
1861 const char *buf, size_t count)
1862 {
1863 struct drm_device *ddev = dev_get_drvdata(dev);
1864 struct amdgpu_device *adev = drm_to_adev(ddev);
1865 int r = 0;
1866 int bias = 0;
1867
1868 r = kstrtoint(buf, 10, &bias);
1869 if (r)
1870 goto out;
1871
1872 r = amdgpu_pm_get_access(adev);
1873 if (r < 0)
1874 return r;
1875
1876 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1877 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1878 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1879 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1880
1881 amdgpu_smartshift_bias = bias;
1882 r = count;
1883
1884 /* TODO: update bias level with SMU message */
1885
1886 out:
1887 amdgpu_pm_put_access(adev);
1888
1889 return r;
1890 }
1891
ss_power_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1892 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1893 uint32_t mask, enum amdgpu_device_attr_states *states)
1894 {
1895 if (!amdgpu_device_supports_smart_shift(adev))
1896 *states = ATTR_STATE_UNSUPPORTED;
1897
1898 return 0;
1899 }
1900
ss_bias_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1901 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1902 uint32_t mask, enum amdgpu_device_attr_states *states)
1903 {
1904 uint32_t ss_power;
1905
1906 if (!amdgpu_device_supports_smart_shift(adev))
1907 *states = ATTR_STATE_UNSUPPORTED;
1908 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1909 (void *)&ss_power))
1910 *states = ATTR_STATE_UNSUPPORTED;
1911 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1912 (void *)&ss_power))
1913 *states = ATTR_STATE_UNSUPPORTED;
1914
1915 return 0;
1916 }
1917
pp_od_clk_voltage_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1918 static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1919 uint32_t mask, enum amdgpu_device_attr_states *states)
1920 {
1921 *states = ATTR_STATE_SUPPORTED;
1922
1923 if (!amdgpu_dpm_is_overdrive_supported(adev)) {
1924 *states = ATTR_STATE_UNSUPPORTED;
1925 return 0;
1926 }
1927
1928 /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0, 12.1.0 SRIOV/BM support */
1929 if (amdgpu_is_multi_aid(adev)) {
1930 if (amdgpu_sriov_multi_vf_mode(adev))
1931 *states = ATTR_STATE_UNSUPPORTED;
1932 return 0;
1933 }
1934
1935 if (!(attr->flags & mask))
1936 *states = ATTR_STATE_UNSUPPORTED;
1937
1938 return 0;
1939 }
1940
pp_dpm_dcefclk_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1941 static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1942 uint32_t mask, enum amdgpu_device_attr_states *states)
1943 {
1944 struct device_attribute *dev_attr = &attr->dev_attr;
1945 uint32_t gc_ver;
1946
1947 *states = ATTR_STATE_SUPPORTED;
1948
1949 if (!(attr->flags & mask)) {
1950 *states = ATTR_STATE_UNSUPPORTED;
1951 return 0;
1952 }
1953
1954 gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1955 /* dcefclk node is not available on gfx 11.0.3 sriov */
1956 if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
1957 gc_ver < IP_VERSION(9, 0, 0) ||
1958 !amdgpu_device_has_display_hardware(adev))
1959 *states = ATTR_STATE_UNSUPPORTED;
1960
1961 /* SMU MP1 does not support dcefclk level setting,
1962 * setting should not be allowed from VF if not in one VF mode.
1963 */
1964 if (gc_ver >= IP_VERSION(10, 0, 0) ||
1965 (amdgpu_sriov_multi_vf_mode(adev))) {
1966 dev_attr->attr.mode &= ~S_IWUGO;
1967 dev_attr->store = NULL;
1968 }
1969
1970 return 0;
1971 }
1972
pp_dpm_clk_default_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1973 static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1974 uint32_t mask, enum amdgpu_device_attr_states *states)
1975 {
1976 struct device_attribute *dev_attr = &attr->dev_attr;
1977 enum amdgpu_device_attr_id attr_id = attr->attr_id;
1978 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
1979 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1980
1981 *states = ATTR_STATE_SUPPORTED;
1982
1983 if (!(attr->flags & mask)) {
1984 *states = ATTR_STATE_UNSUPPORTED;
1985 return 0;
1986 }
1987
1988 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1989 if (gc_ver < IP_VERSION(9, 0, 0))
1990 *states = ATTR_STATE_UNSUPPORTED;
1991 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1992 if (mp1_ver < IP_VERSION(10, 0, 0))
1993 *states = ATTR_STATE_UNSUPPORTED;
1994 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
1995 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
1996 gc_ver == IP_VERSION(10, 3, 3) ||
1997 gc_ver == IP_VERSION(10, 3, 6) ||
1998 gc_ver == IP_VERSION(10, 3, 7) ||
1999 gc_ver == IP_VERSION(10, 3, 0) ||
2000 gc_ver == IP_VERSION(10, 1, 2) ||
2001 gc_ver == IP_VERSION(11, 0, 0) ||
2002 gc_ver == IP_VERSION(11, 0, 1) ||
2003 gc_ver == IP_VERSION(11, 0, 4) ||
2004 gc_ver == IP_VERSION(11, 5, 0) ||
2005 gc_ver == IP_VERSION(11, 0, 2) ||
2006 gc_ver == IP_VERSION(11, 0, 3) ||
2007 amdgpu_is_multi_aid(adev)))
2008 *states = ATTR_STATE_UNSUPPORTED;
2009 } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2010 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2011 gc_ver == IP_VERSION(10, 3, 0) ||
2012 gc_ver == IP_VERSION(11, 0, 2) ||
2013 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2014 *states = ATTR_STATE_UNSUPPORTED;
2015 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2016 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2017 gc_ver == IP_VERSION(10, 3, 3) ||
2018 gc_ver == IP_VERSION(10, 3, 6) ||
2019 gc_ver == IP_VERSION(10, 3, 7) ||
2020 gc_ver == IP_VERSION(10, 3, 0) ||
2021 gc_ver == IP_VERSION(10, 1, 2) ||
2022 gc_ver == IP_VERSION(11, 0, 0) ||
2023 gc_ver == IP_VERSION(11, 0, 1) ||
2024 gc_ver == IP_VERSION(11, 0, 4) ||
2025 gc_ver == IP_VERSION(11, 5, 0) ||
2026 gc_ver == IP_VERSION(11, 0, 2) ||
2027 gc_ver == IP_VERSION(11, 0, 3) ||
2028 amdgpu_is_multi_aid(adev)))
2029 *states = ATTR_STATE_UNSUPPORTED;
2030 } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2031 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2032 gc_ver == IP_VERSION(10, 3, 0) ||
2033 gc_ver == IP_VERSION(11, 0, 2) ||
2034 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2035 *states = ATTR_STATE_UNSUPPORTED;
2036 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2037 if (gc_ver == IP_VERSION(9, 4, 2) ||
2038 amdgpu_is_multi_aid(adev))
2039 *states = ATTR_STATE_UNSUPPORTED;
2040 }
2041
2042 switch (gc_ver) {
2043 case IP_VERSION(9, 4, 1):
2044 case IP_VERSION(9, 4, 2):
2045 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2046 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2047 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2048 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2049 dev_attr->attr.mode &= ~S_IWUGO;
2050 dev_attr->store = NULL;
2051 }
2052 break;
2053 default:
2054 break;
2055 }
2056
2057 /* setting should not be allowed from VF if not in one VF mode */
2058 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
2059 dev_attr->attr.mode &= ~S_IWUGO;
2060 dev_attr->store = NULL;
2061 }
2062
2063 return 0;
2064 }
2065
2066 /**
2067 * DOC: board
2068 *
2069 * Certain SOCs can support various board attributes reporting. This is useful
2070 * for user application to monitor various board reated attributes.
2071 *
2072 * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
2073 * nine types of attributes are reported. Baseboard temperature and
2074 * gpu board temperature are reported as binary files. Npm status, current node power limit,
2075 * max node power limit, node power, global ppt residency, baseboard_power, baseboard_power_limit
2076 * is reported as ASCII text file.
2077 *
2078 * * .. code-block:: console
2079 *
2080 * hexdump /sys/bus/pci/devices/.../board/baseboard_temp
2081 *
2082 * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
2083 *
2084 * hexdump /sys/bus/pci/devices/.../board/npm_status
2085 *
2086 * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit
2087 *
2088 * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit
2089 *
2090 * hexdump /sys/bus/pci/devices/.../board/node_power
2091 *
2092 * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid
2093 *
2094 * hexdump /sys/bus/pci/devices/.../board/baseboard_power
2095 *
2096 * hexdump /sys/bus/pci/devices/.../board/baseboard_power_limit
2097 */
2098
2099 /**
2100 * DOC: baseboard_temp
2101 *
2102 * The amdgpu driver provides a sysfs API for retrieving current baseboard
2103 * temperature metrics data. The file baseboard_temp is used for this.
2104 * Reading the file will dump all the current baseboard temperature metrics data.
2105 */
amdgpu_get_baseboard_temp_metrics(struct device * dev,struct device_attribute * attr,char * buf)2106 static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
2107 struct device_attribute *attr, char *buf)
2108 {
2109 struct drm_device *ddev = dev_get_drvdata(dev);
2110 struct amdgpu_device *adev = drm_to_adev(ddev);
2111 ssize_t size;
2112 int ret;
2113
2114 ret = amdgpu_pm_get_access_if_active(adev);
2115 if (ret)
2116 return ret;
2117
2118 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
2119 if (size <= 0)
2120 goto out;
2121 if (size >= PAGE_SIZE) {
2122 ret = -ENOSPC;
2123 goto out;
2124 }
2125
2126 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
2127
2128 out:
2129 amdgpu_pm_put_access(adev);
2130
2131 if (ret)
2132 return ret;
2133
2134 return size;
2135 }
2136
2137 /**
2138 * DOC: gpuboard_temp
2139 *
2140 * The amdgpu driver provides a sysfs API for retrieving current gpuboard
2141 * temperature metrics data. The file gpuboard_temp is used for this.
2142 * Reading the file will dump all the current gpuboard temperature metrics data.
2143 */
amdgpu_get_gpuboard_temp_metrics(struct device * dev,struct device_attribute * attr,char * buf)2144 static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
2145 struct device_attribute *attr, char *buf)
2146 {
2147 struct drm_device *ddev = dev_get_drvdata(dev);
2148 struct amdgpu_device *adev = drm_to_adev(ddev);
2149 ssize_t size;
2150 int ret;
2151
2152 ret = amdgpu_pm_get_access_if_active(adev);
2153 if (ret)
2154 return ret;
2155
2156 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
2157 if (size <= 0)
2158 goto out;
2159 if (size >= PAGE_SIZE) {
2160 ret = -ENOSPC;
2161 goto out;
2162 }
2163
2164 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
2165
2166 out:
2167 amdgpu_pm_put_access(adev);
2168
2169 if (ret)
2170 return ret;
2171
2172 return size;
2173 }
2174
2175 /**
2176 * DOC: cur_node_power_limit
2177 *
2178 * The amdgpu driver provides a sysfs API for retrieving current node power limit.
2179 * The file cur_node_power_limit is used for this.
2180 */
amdgpu_show_cur_node_power_limit(struct device * dev,struct device_attribute * attr,char * buf)2181 static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev,
2182 struct device_attribute *attr, char *buf)
2183 {
2184 struct drm_device *ddev = dev_get_drvdata(dev);
2185 struct amdgpu_device *adev = drm_to_adev(ddev);
2186 u32 nplimit;
2187 int r;
2188
2189 /* get the current node power limit */
2190 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
2191 (void *)&nplimit);
2192 if (r)
2193 return r;
2194
2195 return sysfs_emit(buf, "%u\n", nplimit);
2196 }
2197
2198 /**
2199 * DOC: node_power
2200 *
2201 * The amdgpu driver provides a sysfs API for retrieving current node power.
2202 * The file node_power is used for this.
2203 */
amdgpu_show_node_power(struct device * dev,struct device_attribute * attr,char * buf)2204 static ssize_t amdgpu_show_node_power(struct device *dev,
2205 struct device_attribute *attr, char *buf)
2206 {
2207 struct drm_device *ddev = dev_get_drvdata(dev);
2208 struct amdgpu_device *adev = drm_to_adev(ddev);
2209 u32 npower;
2210 int r;
2211
2212 /* get the node power */
2213 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2214 (void *)&npower);
2215 if (r)
2216 return r;
2217
2218 return sysfs_emit(buf, "%u\n", npower);
2219 }
2220
2221 /**
2222 * DOC: npm_status
2223 *
2224 * The amdgpu driver provides a sysfs API for retrieving current node power management status.
2225 * The file npm_status is used for this. It shows the status as enabled or disabled based on
2226 * current node power value. If node power is zero, status is disabled else enabled.
2227 */
amdgpu_show_npm_status(struct device * dev,struct device_attribute * attr,char * buf)2228 static ssize_t amdgpu_show_npm_status(struct device *dev,
2229 struct device_attribute *attr, char *buf)
2230 {
2231 struct drm_device *ddev = dev_get_drvdata(dev);
2232 struct amdgpu_device *adev = drm_to_adev(ddev);
2233 u32 npower;
2234 int r;
2235
2236 /* get the node power */
2237 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2238 (void *)&npower);
2239 if (r)
2240 return r;
2241
2242 return sysfs_emit(buf, "%s\n", str_enabled_disabled(npower));
2243 }
2244
2245 /**
2246 * DOC: global_ppt_resid
2247 *
2248 * The amdgpu driver provides a sysfs API for retrieving global ppt residency.
2249 * The file global_ppt_resid is used for this.
2250 */
amdgpu_show_global_ppt_resid(struct device * dev,struct device_attribute * attr,char * buf)2251 static ssize_t amdgpu_show_global_ppt_resid(struct device *dev,
2252 struct device_attribute *attr, char *buf)
2253 {
2254 struct drm_device *ddev = dev_get_drvdata(dev);
2255 struct amdgpu_device *adev = drm_to_adev(ddev);
2256 u32 gpptresid;
2257 int r;
2258
2259 /* get the global ppt residency */
2260 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY,
2261 (void *)&gpptresid);
2262 if (r)
2263 return r;
2264
2265 return sysfs_emit(buf, "%u\n", gpptresid);
2266 }
2267
2268 /**
2269 * DOC: max_node_power_limit
2270 *
2271 * The amdgpu driver provides a sysfs API for retrieving maximum node power limit.
2272 * The file max_node_power_limit is used for this.
2273 */
amdgpu_show_max_node_power_limit(struct device * dev,struct device_attribute * attr,char * buf)2274 static ssize_t amdgpu_show_max_node_power_limit(struct device *dev,
2275 struct device_attribute *attr, char *buf)
2276 {
2277 struct drm_device *ddev = dev_get_drvdata(dev);
2278 struct amdgpu_device *adev = drm_to_adev(ddev);
2279 u32 max_nplimit;
2280 int r;
2281
2282 /* get the max node power limit */
2283 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
2284 (void *)&max_nplimit);
2285 if (r)
2286 return r;
2287
2288 return sysfs_emit(buf, "%u\n", max_nplimit);
2289 }
2290
2291 /**
2292 * DOC: baseboard_power
2293 *
2294 * The amdgpu driver provides a sysfs API for retrieving current ubb power in watts.
2295 * The file baseboard_power is used for this.
2296 */
amdgpu_show_baseboard_power(struct device * dev,struct device_attribute * attr,char * buf)2297 static ssize_t amdgpu_show_baseboard_power(struct device *dev,
2298 struct device_attribute *attr, char *buf)
2299 {
2300 struct drm_device *ddev = dev_get_drvdata(dev);
2301 struct amdgpu_device *adev = drm_to_adev(ddev);
2302 u32 ubbpower;
2303 int r;
2304
2305 /* get the ubb power */
2306 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER,
2307 (void *)&ubbpower);
2308 if (r)
2309 return r;
2310
2311 return sysfs_emit(buf, "%u\n", ubbpower);
2312 }
2313
2314 /**
2315 * DOC: baseboard_power_limit
2316 *
2317 * The amdgpu driver provides a sysfs API for retrieving threshold ubb power in watts.
2318 * The file baseboard_power_limit is used for this.
2319 */
amdgpu_show_baseboard_power_limit(struct device * dev,struct device_attribute * attr,char * buf)2320 static ssize_t amdgpu_show_baseboard_power_limit(struct device *dev,
2321 struct device_attribute *attr, char *buf)
2322 {
2323 struct drm_device *ddev = dev_get_drvdata(dev);
2324 struct amdgpu_device *adev = drm_to_adev(ddev);
2325 u32 ubbpowerlimit;
2326 int r;
2327
2328 /* get the ubb power limit */
2329 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER_LIMIT,
2330 (void *)&ubbpowerlimit);
2331 if (r)
2332 return r;
2333
2334 return sysfs_emit(buf, "%u\n", ubbpowerlimit);
2335 }
2336
2337 static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
2338 static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
2339 static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL);
2340 static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL);
2341 static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL);
2342 static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL);
2343 static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL);
2344 static DEVICE_ATTR(baseboard_power, 0444, amdgpu_show_baseboard_power, NULL);
2345 static DEVICE_ATTR(baseboard_power_limit, 0444, amdgpu_show_baseboard_power_limit, NULL);
2346
2347 static struct attribute *board_attrs[] = {
2348 &dev_attr_baseboard_temp.attr,
2349 &dev_attr_gpuboard_temp.attr,
2350 NULL
2351 };
2352
amdgpu_board_attr_visible(struct kobject * kobj,struct attribute * attr,int n)2353 static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
2354 {
2355 struct device *dev = kobj_to_dev(kobj);
2356 struct drm_device *ddev = dev_get_drvdata(dev);
2357 struct amdgpu_device *adev = drm_to_adev(ddev);
2358
2359 if (attr == &dev_attr_baseboard_temp.attr) {
2360 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
2361 return 0;
2362 }
2363
2364 if (attr == &dev_attr_gpuboard_temp.attr) {
2365 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
2366 return 0;
2367 }
2368
2369 return attr->mode;
2370 }
2371
2372 const struct attribute_group amdgpu_board_attr_group = {
2373 .name = "board",
2374 .attrs = board_attrs,
2375 .is_visible = amdgpu_board_attr_visible,
2376 };
2377
2378 /* pm policy attributes */
2379 struct amdgpu_pm_policy_attr {
2380 struct device_attribute dev_attr;
2381 enum pp_pm_policy id;
2382 };
2383
2384 /**
2385 * DOC: pm_policy
2386 *
2387 * Certain SOCs can support different power policies to optimize application
2388 * performance. However, this policy is provided only at SOC level and not at a
2389 * per-process level. This is useful especially when entire SOC is utilized for
2390 * dedicated workload.
2391 *
2392 * The amdgpu driver provides a sysfs API for selecting the policy. Presently,
2393 * only two types of policies are supported through this interface.
2394 *
2395 * Pstate Policy Selection - This is to select different Pstate profiles which
2396 * decides clock/throttling preferences.
2397 *
2398 * XGMI PLPD Policy Selection - When multiple devices are connected over XGMI,
2399 * this helps to select policy to be applied for per link power down.
2400 *
2401 * The list of available policies and policy levels vary between SOCs. They can
2402 * be viewed under pm_policy node directory. If SOC doesn't support any policy,
2403 * this node won't be available. The different policies supported will be
2404 * available as separate nodes under pm_policy.
2405 *
2406 * cat /sys/bus/pci/devices/.../pm_policy/<policy_type>
2407 *
2408 * Reading the policy file shows the different levels supported. The level which
2409 * is applied presently is denoted by * (asterisk). E.g.,
2410 *
2411 * .. code-block:: console
2412 *
2413 * cat /sys/bus/pci/devices/.../pm_policy/soc_pstate
2414 * 0 : soc_pstate_default
2415 * 1 : soc_pstate_0
2416 * 2 : soc_pstate_1*
2417 * 3 : soc_pstate_2
2418 *
2419 * cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2420 * 0 : plpd_disallow
2421 * 1 : plpd_default
2422 * 2 : plpd_optimized*
2423 *
2424 * To apply a specific policy
2425 *
2426 * "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>"
2427 *
2428 * For the levels listed in the example above, to select "plpd_optimized" for
2429 * XGMI and "soc_pstate_2" for soc pstate policy -
2430 *
2431 * .. code-block:: console
2432 *
2433 * echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2434 * echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate
2435 *
2436 */
amdgpu_get_pm_policy_attr(struct device * dev,struct device_attribute * attr,char * buf)2437 static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
2438 struct device_attribute *attr,
2439 char *buf)
2440 {
2441 struct drm_device *ddev = dev_get_drvdata(dev);
2442 struct amdgpu_device *adev = drm_to_adev(ddev);
2443 struct amdgpu_pm_policy_attr *policy_attr;
2444
2445 policy_attr =
2446 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2447
2448 return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
2449 }
2450
amdgpu_set_pm_policy_attr(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2451 static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
2452 struct device_attribute *attr,
2453 const char *buf, size_t count)
2454 {
2455 struct drm_device *ddev = dev_get_drvdata(dev);
2456 struct amdgpu_device *adev = drm_to_adev(ddev);
2457 struct amdgpu_pm_policy_attr *policy_attr;
2458 int ret, num_params = 0;
2459 char delimiter[] = " \n\t";
2460 char tmp_buf[128];
2461 char *tmp, *param;
2462 long val;
2463
2464 count = min(count, sizeof(tmp_buf));
2465 memcpy(tmp_buf, buf, count);
2466 tmp_buf[count - 1] = '\0';
2467 tmp = tmp_buf;
2468
2469 tmp = skip_spaces(tmp);
2470 while ((param = strsep(&tmp, delimiter))) {
2471 if (!strlen(param)) {
2472 tmp = skip_spaces(tmp);
2473 continue;
2474 }
2475 ret = kstrtol(param, 0, &val);
2476 if (ret)
2477 return -EINVAL;
2478 num_params++;
2479 if (num_params > 1)
2480 return -EINVAL;
2481 }
2482
2483 if (num_params != 1)
2484 return -EINVAL;
2485
2486 policy_attr =
2487 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2488
2489 ret = amdgpu_pm_get_access(adev);
2490 if (ret < 0)
2491 return ret;
2492
2493 ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
2494
2495 amdgpu_pm_put_access(adev);
2496
2497 if (ret)
2498 return ret;
2499
2500 return count;
2501 }
2502
2503 #define AMDGPU_PM_POLICY_ATTR(_name, _id) \
2504 static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \
2505 .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
2506 amdgpu_set_pm_policy_attr), \
2507 .id = PP_PM_POLICY_##_id, \
2508 };
2509
2510 #define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
2511
2512 AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
2513 AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD)
2514
2515 static struct attribute *pm_policy_attrs[] = {
2516 &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
2517 &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd),
2518 NULL
2519 };
2520
amdgpu_pm_policy_attr_visible(struct kobject * kobj,struct attribute * attr,int n)2521 static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
2522 struct attribute *attr, int n)
2523 {
2524 struct device *dev = kobj_to_dev(kobj);
2525 struct drm_device *ddev = dev_get_drvdata(dev);
2526 struct amdgpu_device *adev = drm_to_adev(ddev);
2527 struct amdgpu_pm_policy_attr *policy_attr;
2528
2529 policy_attr =
2530 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
2531
2532 if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
2533 -ENOENT)
2534 return 0;
2535
2536 return attr->mode;
2537 }
2538
2539 const struct attribute_group amdgpu_pm_policy_attr_group = {
2540 .name = "pm_policy",
2541 .attrs = pm_policy_attrs,
2542 .is_visible = amdgpu_pm_policy_attr_visible,
2543 };
2544
2545 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2546 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2547 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2548 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2549 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2550 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2551 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
2552 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2553 .attr_update = pp_dpm_clk_default_attr_update),
2554 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2555 .attr_update = pp_dpm_clk_default_attr_update),
2556 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2557 .attr_update = pp_dpm_clk_default_attr_update),
2558 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2559 .attr_update = pp_dpm_clk_default_attr_update),
2560 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2561 .attr_update = pp_dpm_clk_default_attr_update),
2562 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2563 .attr_update = pp_dpm_clk_default_attr_update),
2564 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2565 .attr_update = pp_dpm_clk_default_attr_update),
2566 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2567 .attr_update = pp_dpm_clk_default_attr_update),
2568 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2569 .attr_update = pp_dpm_dcefclk_attr_update),
2570 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2571 .attr_update = pp_dpm_clk_default_attr_update),
2572 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2573 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2574 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2575 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC,
2576 .attr_update = pp_od_clk_voltage_attr_update),
2577 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2578 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2579 AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2580 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2581 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2582 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2583 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2584 AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2585 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2586 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2587 .attr_update = ss_power_attr_update),
2588 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2589 .attr_update = ss_power_attr_update),
2590 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2591 .attr_update = ss_bias_attr_update),
2592 AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC,
2593 .attr_update = amdgpu_pm_metrics_attr_update),
2594 };
2595
default_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)2596 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2597 uint32_t mask, enum amdgpu_device_attr_states *states)
2598 {
2599 struct device_attribute *dev_attr = &attr->dev_attr;
2600 enum amdgpu_device_attr_id attr_id = attr->attr_id;
2601 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2602
2603 if (!(attr->flags & mask)) {
2604 *states = ATTR_STATE_UNSUPPORTED;
2605 return 0;
2606 }
2607
2608 if (DEVICE_ATTR_IS(mem_busy_percent)) {
2609 if ((adev->flags & AMD_IS_APU &&
2610 gc_ver != IP_VERSION(9, 4, 3)) ||
2611 gc_ver == IP_VERSION(9, 0, 1))
2612 *states = ATTR_STATE_UNSUPPORTED;
2613 } else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
2614 if (!(gc_ver == IP_VERSION(9, 3, 0) ||
2615 gc_ver == IP_VERSION(10, 3, 1) ||
2616 gc_ver == IP_VERSION(10, 3, 3) ||
2617 gc_ver == IP_VERSION(10, 3, 6) ||
2618 gc_ver == IP_VERSION(10, 3, 7) ||
2619 gc_ver == IP_VERSION(11, 0, 0) ||
2620 gc_ver == IP_VERSION(11, 0, 1) ||
2621 gc_ver == IP_VERSION(11, 0, 2) ||
2622 gc_ver == IP_VERSION(11, 0, 3) ||
2623 gc_ver == IP_VERSION(11, 0, 4) ||
2624 gc_ver == IP_VERSION(11, 5, 0) ||
2625 gc_ver == IP_VERSION(11, 5, 1) ||
2626 gc_ver == IP_VERSION(11, 5, 2) ||
2627 gc_ver == IP_VERSION(11, 5, 3) ||
2628 gc_ver == IP_VERSION(12, 0, 0) ||
2629 gc_ver == IP_VERSION(12, 0, 1)))
2630 *states = ATTR_STATE_UNSUPPORTED;
2631 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2632 /* PCIe Perf counters won't work on APU nodes */
2633 if (adev->flags & AMD_IS_APU ||
2634 !adev->asic_funcs->get_pcie_usage)
2635 *states = ATTR_STATE_UNSUPPORTED;
2636 } else if (DEVICE_ATTR_IS(unique_id)) {
2637 switch (gc_ver) {
2638 case IP_VERSION(9, 0, 1):
2639 case IP_VERSION(9, 4, 0):
2640 case IP_VERSION(9, 4, 1):
2641 case IP_VERSION(9, 4, 2):
2642 case IP_VERSION(9, 4, 3):
2643 case IP_VERSION(9, 4, 4):
2644 case IP_VERSION(9, 5, 0):
2645 case IP_VERSION(10, 3, 0):
2646 case IP_VERSION(11, 0, 0):
2647 case IP_VERSION(11, 0, 1):
2648 case IP_VERSION(11, 0, 2):
2649 case IP_VERSION(11, 0, 3):
2650 case IP_VERSION(12, 0, 0):
2651 case IP_VERSION(12, 0, 1):
2652 case IP_VERSION(12, 1, 0):
2653 *states = ATTR_STATE_SUPPORTED;
2654 break;
2655 default:
2656 *states = ATTR_STATE_UNSUPPORTED;
2657 }
2658 } else if (DEVICE_ATTR_IS(pp_features)) {
2659 if ((adev->flags & AMD_IS_APU &&
2660 gc_ver != IP_VERSION(9, 4, 3)) ||
2661 gc_ver < IP_VERSION(9, 0, 0))
2662 *states = ATTR_STATE_UNSUPPORTED;
2663 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2664 if (gc_ver < IP_VERSION(9, 1, 0))
2665 *states = ATTR_STATE_UNSUPPORTED;
2666 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2667 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2668 *states = ATTR_STATE_UNSUPPORTED;
2669 else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2670 gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
2671 *states = ATTR_STATE_UNSUPPORTED;
2672 } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
2673 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2674 *states = ATTR_STATE_UNSUPPORTED;
2675 } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
2676 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2677 *states = ATTR_STATE_UNSUPPORTED;
2678 } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2679 u32 limit;
2680
2681 if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2682 -EOPNOTSUPP)
2683 *states = ATTR_STATE_UNSUPPORTED;
2684 } else if (DEVICE_ATTR_IS(pp_table)) {
2685 int ret;
2686 char *tmp = NULL;
2687
2688 ret = amdgpu_dpm_get_pp_table(adev, &tmp);
2689 if (ret == -EOPNOTSUPP || !tmp)
2690 *states = ATTR_STATE_UNSUPPORTED;
2691 else
2692 *states = ATTR_STATE_SUPPORTED;
2693 }
2694
2695 switch (gc_ver) {
2696 case IP_VERSION(10, 3, 0):
2697 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2698 amdgpu_sriov_vf(adev)) {
2699 dev_attr->attr.mode &= ~0222;
2700 dev_attr->store = NULL;
2701 }
2702 break;
2703 default:
2704 break;
2705 }
2706
2707 return 0;
2708 }
2709
2710
amdgpu_device_attr_create(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,struct list_head * attr_list)2711 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2712 struct amdgpu_device_attr *attr,
2713 uint32_t mask, struct list_head *attr_list)
2714 {
2715 int ret = 0;
2716 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2717 struct amdgpu_device_attr_entry *attr_entry;
2718 struct device_attribute *dev_attr;
2719 const char *name;
2720
2721 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2722 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2723
2724 if (!attr)
2725 return -EINVAL;
2726
2727 dev_attr = &attr->dev_attr;
2728 name = dev_attr->attr.name;
2729
2730 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2731
2732 ret = attr_update(adev, attr, mask, &attr_states);
2733 if (ret) {
2734 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2735 name, ret);
2736 return ret;
2737 }
2738
2739 if (attr_states == ATTR_STATE_UNSUPPORTED)
2740 return 0;
2741
2742 ret = device_create_file(adev->dev, dev_attr);
2743 if (ret) {
2744 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2745 name, ret);
2746 }
2747
2748 attr_entry = kmalloc_obj(*attr_entry);
2749 if (!attr_entry)
2750 return -ENOMEM;
2751
2752 attr_entry->attr = attr;
2753 INIT_LIST_HEAD(&attr_entry->entry);
2754
2755 list_add_tail(&attr_entry->entry, attr_list);
2756
2757 return ret;
2758 }
2759
amdgpu_device_attr_remove(struct amdgpu_device * adev,struct amdgpu_device_attr * attr)2760 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2761 {
2762 struct device_attribute *dev_attr = &attr->dev_attr;
2763
2764 device_remove_file(adev->dev, dev_attr);
2765 }
2766
2767 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2768 struct list_head *attr_list);
2769
amdgpu_device_attr_create_groups(struct amdgpu_device * adev,struct amdgpu_device_attr * attrs,uint32_t counts,uint32_t mask,struct list_head * attr_list)2770 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2771 struct amdgpu_device_attr *attrs,
2772 uint32_t counts,
2773 uint32_t mask,
2774 struct list_head *attr_list)
2775 {
2776 int ret = 0;
2777 uint32_t i = 0;
2778
2779 for (i = 0; i < counts; i++) {
2780 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2781 if (ret)
2782 goto failed;
2783 }
2784
2785 return 0;
2786
2787 failed:
2788 amdgpu_device_attr_remove_groups(adev, attr_list);
2789
2790 return ret;
2791 }
2792
amdgpu_device_attr_remove_groups(struct amdgpu_device * adev,struct list_head * attr_list)2793 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2794 struct list_head *attr_list)
2795 {
2796 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2797
2798 if (list_empty(attr_list))
2799 return ;
2800
2801 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2802 amdgpu_device_attr_remove(adev, entry->attr);
2803 list_del(&entry->entry);
2804 kfree(entry);
2805 }
2806 }
2807
amdgpu_hwmon_show_temp(struct device * dev,struct device_attribute * attr,char * buf)2808 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2809 struct device_attribute *attr,
2810 char *buf)
2811 {
2812 struct amdgpu_device *adev = dev_get_drvdata(dev);
2813 int channel = to_sensor_dev_attr(attr)->index;
2814 int r, temp = 0;
2815
2816 if (channel >= PP_TEMP_MAX)
2817 return -EINVAL;
2818
2819 switch (channel) {
2820 case PP_TEMP_JUNCTION:
2821 /* get current junction temperature */
2822 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2823 (void *)&temp);
2824 break;
2825 case PP_TEMP_EDGE:
2826 /* get current edge temperature */
2827 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2828 (void *)&temp);
2829 break;
2830 case PP_TEMP_MEM:
2831 /* get current memory temperature */
2832 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2833 (void *)&temp);
2834 break;
2835 default:
2836 r = -EINVAL;
2837 break;
2838 }
2839
2840 if (r)
2841 return r;
2842
2843 return sysfs_emit(buf, "%d\n", temp);
2844 }
2845
amdgpu_hwmon_show_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2846 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2847 struct device_attribute *attr,
2848 char *buf)
2849 {
2850 struct amdgpu_device *adev = dev_get_drvdata(dev);
2851 int hyst = to_sensor_dev_attr(attr)->index;
2852 int temp;
2853
2854 if (hyst)
2855 temp = adev->pm.dpm.thermal.min_temp;
2856 else
2857 temp = adev->pm.dpm.thermal.max_temp;
2858
2859 return sysfs_emit(buf, "%d\n", temp);
2860 }
2861
amdgpu_hwmon_show_hotspot_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2862 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2863 struct device_attribute *attr,
2864 char *buf)
2865 {
2866 struct amdgpu_device *adev = dev_get_drvdata(dev);
2867 int hyst = to_sensor_dev_attr(attr)->index;
2868 int temp;
2869
2870 if (hyst)
2871 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2872 else
2873 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2874
2875 return sysfs_emit(buf, "%d\n", temp);
2876 }
2877
amdgpu_hwmon_show_mem_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2878 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2879 struct device_attribute *attr,
2880 char *buf)
2881 {
2882 struct amdgpu_device *adev = dev_get_drvdata(dev);
2883 int hyst = to_sensor_dev_attr(attr)->index;
2884 int temp;
2885
2886 if (hyst)
2887 temp = adev->pm.dpm.thermal.min_mem_temp;
2888 else
2889 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2890
2891 return sysfs_emit(buf, "%d\n", temp);
2892 }
2893
amdgpu_hwmon_show_temp_label(struct device * dev,struct device_attribute * attr,char * buf)2894 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2895 struct device_attribute *attr,
2896 char *buf)
2897 {
2898 int channel = to_sensor_dev_attr(attr)->index;
2899
2900 if (channel >= PP_TEMP_MAX)
2901 return -EINVAL;
2902
2903 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2904 }
2905
amdgpu_hwmon_show_temp_emergency(struct device * dev,struct device_attribute * attr,char * buf)2906 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2907 struct device_attribute *attr,
2908 char *buf)
2909 {
2910 struct amdgpu_device *adev = dev_get_drvdata(dev);
2911 int channel = to_sensor_dev_attr(attr)->index;
2912 int temp = 0;
2913
2914 if (channel >= PP_TEMP_MAX)
2915 return -EINVAL;
2916
2917 switch (channel) {
2918 case PP_TEMP_JUNCTION:
2919 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2920 break;
2921 case PP_TEMP_EDGE:
2922 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2923 break;
2924 case PP_TEMP_MEM:
2925 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2926 break;
2927 }
2928
2929 return sysfs_emit(buf, "%d\n", temp);
2930 }
2931
amdgpu_hwmon_get_pwm1_enable(struct device * dev,struct device_attribute * attr,char * buf)2932 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2933 struct device_attribute *attr,
2934 char *buf)
2935 {
2936 struct amdgpu_device *adev = dev_get_drvdata(dev);
2937 u32 pwm_mode = 0;
2938 int ret;
2939
2940 ret = amdgpu_pm_get_access_if_active(adev);
2941 if (ret)
2942 return ret;
2943
2944 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2945
2946 amdgpu_pm_put_access(adev);
2947
2948 if (ret)
2949 return -EINVAL;
2950
2951 return sysfs_emit(buf, "%u\n", pwm_mode);
2952 }
2953
amdgpu_hwmon_set_pwm1_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2954 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2955 struct device_attribute *attr,
2956 const char *buf,
2957 size_t count)
2958 {
2959 struct amdgpu_device *adev = dev_get_drvdata(dev);
2960 int err, ret;
2961 u32 pwm_mode;
2962 int value;
2963
2964 err = kstrtoint(buf, 10, &value);
2965 if (err)
2966 return err;
2967
2968 if (value == 0)
2969 pwm_mode = AMD_FAN_CTRL_NONE;
2970 else if (value == 1)
2971 pwm_mode = AMD_FAN_CTRL_MANUAL;
2972 else if (value == 2)
2973 pwm_mode = AMD_FAN_CTRL_AUTO;
2974 else
2975 return -EINVAL;
2976
2977 ret = amdgpu_pm_get_access(adev);
2978 if (ret < 0)
2979 return ret;
2980
2981 ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2982
2983 amdgpu_pm_put_access(adev);
2984
2985 if (ret)
2986 return -EINVAL;
2987
2988 return count;
2989 }
2990
amdgpu_hwmon_get_pwm1_min(struct device * dev,struct device_attribute * attr,char * buf)2991 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2992 struct device_attribute *attr,
2993 char *buf)
2994 {
2995 return sysfs_emit(buf, "%i\n", 0);
2996 }
2997
amdgpu_hwmon_get_pwm1_max(struct device * dev,struct device_attribute * attr,char * buf)2998 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2999 struct device_attribute *attr,
3000 char *buf)
3001 {
3002 return sysfs_emit(buf, "%i\n", 255);
3003 }
3004
amdgpu_hwmon_set_pwm1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3005 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
3006 struct device_attribute *attr,
3007 const char *buf, size_t count)
3008 {
3009 struct amdgpu_device *adev = dev_get_drvdata(dev);
3010 int err;
3011 u32 value;
3012 u32 pwm_mode;
3013
3014 err = kstrtou32(buf, 10, &value);
3015 if (err)
3016 return err;
3017
3018 err = amdgpu_pm_get_access(adev);
3019 if (err < 0)
3020 return err;
3021
3022 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3023 if (err)
3024 goto out;
3025
3026 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
3027 pr_info("manual fan speed control should be enabled first\n");
3028 err = -EINVAL;
3029 goto out;
3030 }
3031
3032 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
3033
3034 out:
3035 amdgpu_pm_put_access(adev);
3036
3037 if (err)
3038 return err;
3039
3040 return count;
3041 }
3042
amdgpu_hwmon_get_pwm1(struct device * dev,struct device_attribute * attr,char * buf)3043 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
3044 struct device_attribute *attr,
3045 char *buf)
3046 {
3047 struct amdgpu_device *adev = dev_get_drvdata(dev);
3048 int err;
3049 u32 speed = 0;
3050
3051 err = amdgpu_pm_get_access_if_active(adev);
3052 if (err)
3053 return err;
3054
3055 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
3056
3057 amdgpu_pm_put_access(adev);
3058
3059 if (err)
3060 return err;
3061
3062 return sysfs_emit(buf, "%i\n", speed);
3063 }
3064
amdgpu_hwmon_get_fan1_input(struct device * dev,struct device_attribute * attr,char * buf)3065 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
3066 struct device_attribute *attr,
3067 char *buf)
3068 {
3069 struct amdgpu_device *adev = dev_get_drvdata(dev);
3070 int err;
3071 u32 speed = 0;
3072
3073 err = amdgpu_pm_get_access_if_active(adev);
3074 if (err)
3075 return err;
3076
3077 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
3078
3079 amdgpu_pm_put_access(adev);
3080
3081 if (err)
3082 return err;
3083
3084 return sysfs_emit(buf, "%i\n", speed);
3085 }
3086
amdgpu_hwmon_get_fan1_min(struct device * dev,struct device_attribute * attr,char * buf)3087 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
3088 struct device_attribute *attr,
3089 char *buf)
3090 {
3091 struct amdgpu_device *adev = dev_get_drvdata(dev);
3092 u32 min_rpm = 0;
3093 int r;
3094
3095 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
3096 (void *)&min_rpm);
3097
3098 if (r)
3099 return r;
3100
3101 return sysfs_emit(buf, "%d\n", min_rpm);
3102 }
3103
amdgpu_hwmon_get_fan1_max(struct device * dev,struct device_attribute * attr,char * buf)3104 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
3105 struct device_attribute *attr,
3106 char *buf)
3107 {
3108 struct amdgpu_device *adev = dev_get_drvdata(dev);
3109 u32 max_rpm = 0;
3110 int r;
3111
3112 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
3113 (void *)&max_rpm);
3114
3115 if (r)
3116 return r;
3117
3118 return sysfs_emit(buf, "%d\n", max_rpm);
3119 }
3120
amdgpu_hwmon_get_fan1_target(struct device * dev,struct device_attribute * attr,char * buf)3121 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
3122 struct device_attribute *attr,
3123 char *buf)
3124 {
3125 struct amdgpu_device *adev = dev_get_drvdata(dev);
3126 int err;
3127 u32 rpm = 0;
3128
3129 err = amdgpu_pm_get_access_if_active(adev);
3130 if (err)
3131 return err;
3132
3133 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
3134
3135 amdgpu_pm_put_access(adev);
3136
3137 if (err)
3138 return err;
3139
3140 return sysfs_emit(buf, "%i\n", rpm);
3141 }
3142
amdgpu_hwmon_set_fan1_target(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3143 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
3144 struct device_attribute *attr,
3145 const char *buf, size_t count)
3146 {
3147 struct amdgpu_device *adev = dev_get_drvdata(dev);
3148 int err;
3149 u32 value;
3150 u32 pwm_mode;
3151
3152 err = kstrtou32(buf, 10, &value);
3153 if (err)
3154 return err;
3155
3156 err = amdgpu_pm_get_access(adev);
3157 if (err < 0)
3158 return err;
3159
3160 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3161 if (err)
3162 goto out;
3163
3164 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
3165 err = -ENODATA;
3166 goto out;
3167 }
3168
3169 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
3170
3171 out:
3172 amdgpu_pm_put_access(adev);
3173
3174 if (err)
3175 return err;
3176
3177 return count;
3178 }
3179
amdgpu_hwmon_get_fan1_enable(struct device * dev,struct device_attribute * attr,char * buf)3180 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
3181 struct device_attribute *attr,
3182 char *buf)
3183 {
3184 struct amdgpu_device *adev = dev_get_drvdata(dev);
3185 u32 pwm_mode = 0;
3186 int ret;
3187
3188 ret = amdgpu_pm_get_access_if_active(adev);
3189 if (ret)
3190 return ret;
3191
3192 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3193
3194 amdgpu_pm_put_access(adev);
3195
3196 if (ret)
3197 return -EINVAL;
3198
3199 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
3200 }
3201
amdgpu_hwmon_set_fan1_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3202 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
3203 struct device_attribute *attr,
3204 const char *buf,
3205 size_t count)
3206 {
3207 struct amdgpu_device *adev = dev_get_drvdata(dev);
3208 int err;
3209 int value;
3210 u32 pwm_mode;
3211
3212 err = kstrtoint(buf, 10, &value);
3213 if (err)
3214 return err;
3215
3216 if (value == 0)
3217 pwm_mode = AMD_FAN_CTRL_AUTO;
3218 else if (value == 1)
3219 pwm_mode = AMD_FAN_CTRL_MANUAL;
3220 else
3221 return -EINVAL;
3222
3223 err = amdgpu_pm_get_access(adev);
3224 if (err < 0)
3225 return err;
3226
3227 err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
3228
3229 amdgpu_pm_put_access(adev);
3230
3231 if (err)
3232 return -EINVAL;
3233
3234 return count;
3235 }
3236
amdgpu_hwmon_show_vddgfx(struct device * dev,struct device_attribute * attr,char * buf)3237 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
3238 struct device_attribute *attr,
3239 char *buf)
3240 {
3241 struct amdgpu_device *adev = dev_get_drvdata(dev);
3242 u32 vddgfx;
3243 int r;
3244
3245 /* get the voltage */
3246 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
3247 (void *)&vddgfx);
3248 if (r)
3249 return r;
3250
3251 return sysfs_emit(buf, "%d\n", vddgfx);
3252 }
3253
amdgpu_hwmon_show_vddboard(struct device * dev,struct device_attribute * attr,char * buf)3254 static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
3255 struct device_attribute *attr,
3256 char *buf)
3257 {
3258 struct amdgpu_device *adev = dev_get_drvdata(dev);
3259 u32 vddboard;
3260 int r;
3261
3262 /* get the voltage */
3263 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3264 (void *)&vddboard);
3265 if (r)
3266 return r;
3267
3268 return sysfs_emit(buf, "%d\n", vddboard);
3269 }
3270
amdgpu_hwmon_show_vddgfx_label(struct device * dev,struct device_attribute * attr,char * buf)3271 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
3272 struct device_attribute *attr,
3273 char *buf)
3274 {
3275 return sysfs_emit(buf, "vddgfx\n");
3276 }
3277
amdgpu_hwmon_show_vddboard_label(struct device * dev,struct device_attribute * attr,char * buf)3278 static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
3279 struct device_attribute *attr,
3280 char *buf)
3281 {
3282 return sysfs_emit(buf, "vddboard\n");
3283 }
amdgpu_hwmon_show_vddnb(struct device * dev,struct device_attribute * attr,char * buf)3284 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
3285 struct device_attribute *attr,
3286 char *buf)
3287 {
3288 struct amdgpu_device *adev = dev_get_drvdata(dev);
3289 u32 vddnb;
3290 int r;
3291
3292 /* only APUs have vddnb */
3293 if (!(adev->flags & AMD_IS_APU))
3294 return -EINVAL;
3295
3296 /* get the voltage */
3297 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
3298 (void *)&vddnb);
3299 if (r)
3300 return r;
3301
3302 return sysfs_emit(buf, "%d\n", vddnb);
3303 }
3304
amdgpu_hwmon_show_vddnb_label(struct device * dev,struct device_attribute * attr,char * buf)3305 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
3306 struct device_attribute *attr,
3307 char *buf)
3308 {
3309 return sysfs_emit(buf, "vddnb\n");
3310 }
3311
amdgpu_hwmon_get_power(struct device * dev,enum amd_pp_sensors sensor)3312 static int amdgpu_hwmon_get_power(struct device *dev,
3313 enum amd_pp_sensors sensor)
3314 {
3315 struct amdgpu_device *adev = dev_get_drvdata(dev);
3316 unsigned int uw;
3317 u32 query = 0;
3318 int r;
3319
3320 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query);
3321 if (r)
3322 return r;
3323
3324 /* convert to microwatts */
3325 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3326
3327 return uw;
3328 }
3329
amdgpu_hwmon_show_power_avg(struct device * dev,struct device_attribute * attr,char * buf)3330 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3331 struct device_attribute *attr,
3332 char *buf)
3333 {
3334 ssize_t val;
3335
3336 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
3337 if (val < 0)
3338 return val;
3339
3340 return sysfs_emit(buf, "%zd\n", val);
3341 }
3342
amdgpu_hwmon_show_power_input(struct device * dev,struct device_attribute * attr,char * buf)3343 static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
3344 struct device_attribute *attr,
3345 char *buf)
3346 {
3347 ssize_t val;
3348
3349 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
3350 if (val < 0)
3351 return val;
3352
3353 return sysfs_emit(buf, "%zd\n", val);
3354 }
3355
amdgpu_hwmon_show_power_cap_generic(struct device * dev,struct device_attribute * attr,char * buf,enum pp_power_limit_level pp_limit_level)3356 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
3357 struct device_attribute *attr,
3358 char *buf,
3359 enum pp_power_limit_level pp_limit_level)
3360 {
3361 struct amdgpu_device *adev = dev_get_drvdata(dev);
3362 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
3363 uint32_t limit;
3364 ssize_t size;
3365 int r;
3366
3367 r = amdgpu_pm_get_access_if_active(adev);
3368 if (r)
3369 return r;
3370
3371 r = amdgpu_dpm_get_power_limit(adev, &limit,
3372 pp_limit_level, power_type);
3373
3374 if (!r)
3375 size = sysfs_emit(buf, "%u\n", limit * 1000000);
3376 else
3377 size = sysfs_emit(buf, "\n");
3378
3379 amdgpu_pm_put_access(adev);
3380
3381 return size;
3382 }
3383
amdgpu_hwmon_show_power_cap_min(struct device * dev,struct device_attribute * attr,char * buf)3384 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3385 struct device_attribute *attr,
3386 char *buf)
3387 {
3388 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
3389 }
3390
amdgpu_hwmon_show_power_cap_max(struct device * dev,struct device_attribute * attr,char * buf)3391 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3392 struct device_attribute *attr,
3393 char *buf)
3394 {
3395 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3396
3397 }
3398
amdgpu_hwmon_show_power_cap(struct device * dev,struct device_attribute * attr,char * buf)3399 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3400 struct device_attribute *attr,
3401 char *buf)
3402 {
3403 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3404
3405 }
3406
amdgpu_hwmon_show_power_cap_default(struct device * dev,struct device_attribute * attr,char * buf)3407 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3408 struct device_attribute *attr,
3409 char *buf)
3410 {
3411 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3412
3413 }
3414
amdgpu_hwmon_show_power_label(struct device * dev,struct device_attribute * attr,char * buf)3415 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3416 struct device_attribute *attr,
3417 char *buf)
3418 {
3419 struct amdgpu_device *adev = dev_get_drvdata(dev);
3420 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3421
3422 if (gc_ver == IP_VERSION(10, 3, 1))
3423 return sysfs_emit(buf, "%s\n",
3424 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3425 "fastPPT" : "slowPPT");
3426 else
3427 return sysfs_emit(buf, "%s\n",
3428 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3429 "PPT1" : "PPT");
3430 }
3431
amdgpu_hwmon_set_power_cap(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3432 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3433 struct device_attribute *attr,
3434 const char *buf,
3435 size_t count)
3436 {
3437 struct amdgpu_device *adev = dev_get_drvdata(dev);
3438 int limit_type = to_sensor_dev_attr(attr)->index;
3439 int err;
3440 u32 value;
3441
3442 err = kstrtou32(buf, 10, &value);
3443 if (err)
3444 return err;
3445
3446 value = value / 1000000; /* convert to Watt */
3447
3448 err = amdgpu_pm_get_access(adev);
3449 if (err < 0)
3450 return err;
3451
3452 err = amdgpu_dpm_set_power_limit(adev, limit_type, value);
3453
3454 amdgpu_pm_put_access(adev);
3455
3456 if (err)
3457 return err;
3458
3459 return count;
3460 }
3461
amdgpu_hwmon_show_sclk(struct device * dev,struct device_attribute * attr,char * buf)3462 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3463 struct device_attribute *attr,
3464 char *buf)
3465 {
3466 struct amdgpu_device *adev = dev_get_drvdata(dev);
3467 uint32_t sclk;
3468 int r;
3469
3470 /* get the sclk */
3471 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3472 (void *)&sclk);
3473 if (r)
3474 return r;
3475
3476 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3477 }
3478
amdgpu_hwmon_show_sclk_label(struct device * dev,struct device_attribute * attr,char * buf)3479 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3480 struct device_attribute *attr,
3481 char *buf)
3482 {
3483 return sysfs_emit(buf, "sclk\n");
3484 }
3485
amdgpu_hwmon_show_mclk(struct device * dev,struct device_attribute * attr,char * buf)3486 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3487 struct device_attribute *attr,
3488 char *buf)
3489 {
3490 struct amdgpu_device *adev = dev_get_drvdata(dev);
3491 uint32_t mclk;
3492 int r;
3493
3494 /* get the sclk */
3495 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3496 (void *)&mclk);
3497 if (r)
3498 return r;
3499
3500 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3501 }
3502
amdgpu_hwmon_show_mclk_label(struct device * dev,struct device_attribute * attr,char * buf)3503 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3504 struct device_attribute *attr,
3505 char *buf)
3506 {
3507 return sysfs_emit(buf, "mclk\n");
3508 }
3509
3510 /**
3511 * DOC: hwmon
3512 *
3513 * The amdgpu driver exposes the following sensor interfaces:
3514 *
3515 * - GPU temperature (via the on-die sensor)
3516 *
3517 * - GPU voltage
3518 *
3519 * - Northbridge voltage (APUs only)
3520 *
3521 * - GPU power
3522 *
3523 * - GPU fan
3524 *
3525 * - GPU gfx/compute engine clock
3526 *
3527 * - GPU memory clock (dGPU only)
3528 *
3529 * hwmon interfaces for GPU temperature:
3530 *
3531 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3532 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
3533 *
3534 * - temp[1-3]_label: temperature channel label
3535 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3536 *
3537 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3538 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3539 *
3540 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3541 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3542 *
3543 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3544 * - these are supported on SOC15 dGPUs only
3545 *
3546 * hwmon interfaces for GPU voltage:
3547 *
3548 * - in0_input: the voltage on the GPU in millivolts
3549 *
3550 * - in1_input: the voltage on the Northbridge in millivolts
3551 *
3552 * hwmon interfaces for GPU power:
3553 *
3554 * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
3555 *
3556 * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU.
3557 *
3558 * - power1_cap_min: minimum cap supported in microWatts
3559 *
3560 * - power1_cap_max: maximum cap supported in microWatts
3561 *
3562 * - power1_cap: selected power cap in microWatts
3563 *
3564 * hwmon interfaces for GPU fan:
3565 *
3566 * - pwm1: pulse width modulation fan level (0-255)
3567 *
3568 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3569 *
3570 * - pwm1_min: pulse width modulation fan control minimum level (0)
3571 *
3572 * - pwm1_max: pulse width modulation fan control maximum level (255)
3573 *
3574 * - fan1_min: a minimum value Unit: revolution/min (RPM)
3575 *
3576 * - fan1_max: a maximum value Unit: revolution/max (RPM)
3577 *
3578 * - fan1_input: fan speed in RPM
3579 *
3580 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3581 *
3582 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3583 *
3584 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3585 * That will get the former one overridden.
3586 *
3587 * hwmon interfaces for GPU clocks:
3588 *
3589 * - freq1_input: the gfx/compute clock in hertz
3590 *
3591 * - freq2_input: the memory clock in hertz
3592 *
3593 * You can use hwmon tools like sensors to view this information on your system.
3594 *
3595 */
3596
3597 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3598 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3599 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3600 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3601 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3602 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3603 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3604 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3605 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3606 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3607 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3608 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3609 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3610 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3611 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3612 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3613 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3614 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3615 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3616 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3617 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3618 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3619 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3620 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3621 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3622 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3623 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3624 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3625 static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
3626 static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
3627 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3628 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3629 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3630 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3631 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3632 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3633 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3634 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3635 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3636 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3637 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3638 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3639 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3640 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3641 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3642 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3643
3644 static struct attribute *hwmon_attributes[] = {
3645 &sensor_dev_attr_temp1_input.dev_attr.attr,
3646 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3647 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3648 &sensor_dev_attr_temp2_input.dev_attr.attr,
3649 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3650 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3651 &sensor_dev_attr_temp3_input.dev_attr.attr,
3652 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3653 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3654 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3655 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3656 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3657 &sensor_dev_attr_temp1_label.dev_attr.attr,
3658 &sensor_dev_attr_temp2_label.dev_attr.attr,
3659 &sensor_dev_attr_temp3_label.dev_attr.attr,
3660 &sensor_dev_attr_pwm1.dev_attr.attr,
3661 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3662 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3663 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3664 &sensor_dev_attr_fan1_input.dev_attr.attr,
3665 &sensor_dev_attr_fan1_min.dev_attr.attr,
3666 &sensor_dev_attr_fan1_max.dev_attr.attr,
3667 &sensor_dev_attr_fan1_target.dev_attr.attr,
3668 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3669 &sensor_dev_attr_in0_input.dev_attr.attr,
3670 &sensor_dev_attr_in0_label.dev_attr.attr,
3671 &sensor_dev_attr_in1_input.dev_attr.attr,
3672 &sensor_dev_attr_in1_label.dev_attr.attr,
3673 &sensor_dev_attr_in2_input.dev_attr.attr,
3674 &sensor_dev_attr_in2_label.dev_attr.attr,
3675 &sensor_dev_attr_power1_average.dev_attr.attr,
3676 &sensor_dev_attr_power1_input.dev_attr.attr,
3677 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3678 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3679 &sensor_dev_attr_power1_cap.dev_attr.attr,
3680 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3681 &sensor_dev_attr_power1_label.dev_attr.attr,
3682 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3683 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3684 &sensor_dev_attr_power2_cap.dev_attr.attr,
3685 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3686 &sensor_dev_attr_power2_label.dev_attr.attr,
3687 &sensor_dev_attr_freq1_input.dev_attr.attr,
3688 &sensor_dev_attr_freq1_label.dev_attr.attr,
3689 &sensor_dev_attr_freq2_input.dev_attr.attr,
3690 &sensor_dev_attr_freq2_label.dev_attr.attr,
3691 NULL
3692 };
3693
hwmon_attributes_visible(struct kobject * kobj,struct attribute * attr,int index)3694 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3695 struct attribute *attr, int index)
3696 {
3697 struct device *dev = kobj_to_dev(kobj);
3698 struct amdgpu_device *adev = dev_get_drvdata(dev);
3699 umode_t effective_mode = attr->mode;
3700 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3701 uint32_t tmp;
3702
3703 /* under pp one vf mode manage of hwmon attributes is not supported */
3704 if (amdgpu_sriov_is_pp_one_vf(adev))
3705 effective_mode &= ~S_IWUSR;
3706
3707 /* Skip fan attributes if fan is not present */
3708 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3709 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3710 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3711 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3712 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3713 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3714 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3715 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3716 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3717 return 0;
3718
3719 /* Skip fan attributes on APU */
3720 if ((adev->flags & AMD_IS_APU) &&
3721 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3722 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3723 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3724 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3725 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3726 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3727 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3728 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3729 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3730 return 0;
3731
3732 /* Skip crit temp on APU */
3733 if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3734 amdgpu_is_multi_aid(adev)) &&
3735 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3736 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3737 return 0;
3738
3739 /* Skip limit attributes if DPM is not enabled */
3740 if (!adev->pm.dpm_enabled &&
3741 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3742 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3743 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3744 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3745 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3746 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3747 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3748 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3749 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3750 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3751 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3752 return 0;
3753
3754 /* mask fan attributes if we have no bindings for this asic to expose */
3755 if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3756 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3757 ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3758 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3759 effective_mode &= ~S_IRUGO;
3760
3761 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3762 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3763 ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3764 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3765 effective_mode &= ~S_IWUSR;
3766
3767 /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3768 if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3769 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3770 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3771 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
3772 if (adev->family == AMDGPU_FAMILY_SI ||
3773 ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
3774 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
3775 (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
3776 return 0;
3777 }
3778
3779 if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr &&
3780 amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT))
3781 effective_mode |= S_IWUSR;
3782
3783 /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3784 if (((adev->family == AMDGPU_FAMILY_SI) ||
3785 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3786 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3787 return 0;
3788
3789 /* not all products support both average and instantaneous */
3790 if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3791 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER,
3792 (void *)&tmp) == -EOPNOTSUPP)
3793 return 0;
3794 if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3795 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
3796 (void *)&tmp) == -EOPNOTSUPP)
3797 return 0;
3798
3799 /* hide max/min values if we can't both query and manage the fan */
3800 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3801 (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3802 (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3803 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3804 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3805 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3806 return 0;
3807
3808 if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3809 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3810 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3811 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3812 return 0;
3813
3814 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3815 adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
3816 amdgpu_is_multi_aid(adev)) &&
3817 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3818 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3819 return 0;
3820
3821 /* only APUs other than gc 9,4,3 have vddnb */
3822 if ((!(adev->flags & AMD_IS_APU) ||
3823 amdgpu_is_multi_aid(adev)) &&
3824 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3825 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3826 return 0;
3827
3828 /* only few boards support vddboard */
3829 if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
3830 attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
3831 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3832 (void *)&tmp) == -EOPNOTSUPP)
3833 return 0;
3834
3835 /* no mclk on APUs other than gc 9,4,3*/
3836 if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3837 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3838 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3839 return 0;
3840
3841 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3842 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) &&
3843 (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3844 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3845 attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3846 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3847 attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3848 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3849 return 0;
3850
3851 /* hotspot temperature for gc 9,4,3*/
3852 if (amdgpu_is_multi_aid(adev)) {
3853 if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3854 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3855 attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3856 return 0;
3857
3858 if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3859 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3860 return attr->mode;
3861 }
3862
3863 /* only SOC15 dGPUs support hotspot and mem temperatures */
3864 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3865 (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3866 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3867 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3868 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3869 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3870 return 0;
3871
3872 /* only a few GPUs have fast PPT limit and power labels */
3873 if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3874 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3875 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3876 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3877 attr == &sensor_dev_attr_power2_label.dev_attr.attr) &&
3878 (amdgpu_dpm_get_power_limit(adev, &tmp,
3879 PP_PWR_LIMIT_MAX,
3880 PP_PWR_TYPE_FAST) == -EOPNOTSUPP))
3881 return 0;
3882
3883 return effective_mode;
3884 }
3885
3886 static const struct attribute_group hwmon_attrgroup = {
3887 .attrs = hwmon_attributes,
3888 .is_visible = hwmon_attributes_visible,
3889 };
3890
3891 static const struct attribute_group *hwmon_groups[] = {
3892 &hwmon_attrgroup,
3893 NULL
3894 };
3895
amdgpu_retrieve_od_settings(struct amdgpu_device * adev,enum pp_clock_type od_type,char * buf)3896 static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3897 enum pp_clock_type od_type,
3898 char *buf)
3899 {
3900 int size = 0;
3901 int ret;
3902
3903 ret = amdgpu_pm_get_access_if_active(adev);
3904 if (ret)
3905 return ret;
3906
3907 ret = amdgpu_dpm_emit_clock_levels(adev, od_type, buf, &size);
3908 if (ret) {
3909 size = ret;
3910 goto out_pm_put;
3911 }
3912 if (size == 0)
3913 size = sysfs_emit(buf, "\n");
3914
3915 out_pm_put:
3916 amdgpu_pm_put_access(adev);
3917
3918 return size;
3919 }
3920
parse_input_od_command_lines(const char * buf,size_t count,u32 * type,long * params,uint32_t * num_of_params)3921 static int parse_input_od_command_lines(const char *buf,
3922 size_t count,
3923 u32 *type,
3924 long *params,
3925 uint32_t *num_of_params)
3926 {
3927 const char delimiter[3] = {' ', '\n', '\0'};
3928 uint32_t parameter_size = 0;
3929 char buf_cpy[128] = {0};
3930 char *tmp_str, *sub_str;
3931 int ret;
3932
3933 if (count > sizeof(buf_cpy) - 1)
3934 return -EINVAL;
3935
3936 memcpy(buf_cpy, buf, count);
3937 tmp_str = buf_cpy;
3938
3939 /* skip heading spaces */
3940 while (isspace(*tmp_str))
3941 tmp_str++;
3942
3943 switch (*tmp_str) {
3944 case 'c':
3945 *type = PP_OD_COMMIT_DPM_TABLE;
3946 return 0;
3947 case 'r':
3948 params[parameter_size] = *type;
3949 *num_of_params = 1;
3950 *type = PP_OD_RESTORE_DEFAULT_TABLE;
3951 return 0;
3952 default:
3953 break;
3954 }
3955
3956 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3957 if (strlen(sub_str) == 0)
3958 continue;
3959
3960 ret = kstrtol(sub_str, 0, ¶ms[parameter_size]);
3961 if (ret)
3962 return -EINVAL;
3963 parameter_size++;
3964
3965 if (!tmp_str)
3966 break;
3967
3968 while (isspace(*tmp_str))
3969 tmp_str++;
3970 }
3971
3972 *num_of_params = parameter_size;
3973
3974 return 0;
3975 }
3976
3977 static int
amdgpu_distribute_custom_od_settings(struct amdgpu_device * adev,enum PP_OD_DPM_TABLE_COMMAND cmd_type,const char * in_buf,size_t count)3978 amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3979 enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3980 const char *in_buf,
3981 size_t count)
3982 {
3983 uint32_t parameter_size = 0;
3984 long parameter[64];
3985 int ret;
3986
3987 ret = parse_input_od_command_lines(in_buf,
3988 count,
3989 &cmd_type,
3990 parameter,
3991 ¶meter_size);
3992 if (ret)
3993 return ret;
3994
3995 ret = amdgpu_pm_get_access(adev);
3996 if (ret < 0)
3997 return ret;
3998
3999 ret = amdgpu_dpm_odn_edit_dpm_table(adev,
4000 cmd_type,
4001 parameter,
4002 parameter_size);
4003 if (ret)
4004 goto err_out;
4005
4006 if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
4007 ret = amdgpu_dpm_dispatch_task(adev,
4008 AMD_PP_TASK_READJUST_POWER_STATE,
4009 NULL);
4010 if (ret)
4011 goto err_out;
4012 }
4013
4014 amdgpu_pm_put_access(adev);
4015
4016 return count;
4017
4018 err_out:
4019 amdgpu_pm_put_access(adev);
4020
4021 return ret;
4022 }
4023
4024 /**
4025 * DOC: fan_curve
4026 *
4027 * The amdgpu driver provides a sysfs API for checking and adjusting the fan
4028 * control curve line.
4029 *
4030 * Reading back the file shows you the current settings(temperature in Celsius
4031 * degree and fan speed in pwm) applied to every anchor point of the curve line
4032 * and their permitted ranges if changable.
4033 *
4034 * Writing a desired string(with the format like "anchor_point_index temperature
4035 * fan_speed_in_pwm") to the file, change the settings for the specific anchor
4036 * point accordingly.
4037 *
4038 * When you have finished the editing, write "c" (commit) to the file to commit
4039 * your changes.
4040 *
4041 * If you want to reset to the default value, write "r" (reset) to the file to
4042 * reset them
4043 *
4044 * There are two fan control modes supported: auto and manual. With auto mode,
4045 * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
4046 * While with manual mode, users can set their own fan curve line as what
4047 * described here. Normally the ASIC is booted up with auto mode. Any
4048 * settings via this interface will switch the fan control to manual mode
4049 * implicitly.
4050 */
fan_curve_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4051 static ssize_t fan_curve_show(struct kobject *kobj,
4052 struct kobj_attribute *attr,
4053 char *buf)
4054 {
4055 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4056 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4057
4058 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
4059 }
4060
fan_curve_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4061 static ssize_t fan_curve_store(struct kobject *kobj,
4062 struct kobj_attribute *attr,
4063 const char *buf,
4064 size_t count)
4065 {
4066 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4067 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4068
4069 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4070 PP_OD_EDIT_FAN_CURVE,
4071 buf,
4072 count);
4073 }
4074
fan_curve_visible(struct amdgpu_device * adev)4075 static umode_t fan_curve_visible(struct amdgpu_device *adev)
4076 {
4077 umode_t umode = 0000;
4078
4079 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
4080 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4081
4082 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
4083 umode |= S_IWUSR;
4084
4085 return umode;
4086 }
4087
4088 /**
4089 * DOC: acoustic_limit_rpm_threshold
4090 *
4091 * The amdgpu driver provides a sysfs API for checking and adjusting the
4092 * acoustic limit in RPM for fan control.
4093 *
4094 * Reading back the file shows you the current setting and the permitted
4095 * ranges if changable.
4096 *
4097 * Writing an integer to the file, change the setting accordingly.
4098 *
4099 * When you have finished the editing, write "c" (commit) to the file to commit
4100 * your changes.
4101 *
4102 * If you want to reset to the default value, write "r" (reset) to the file to
4103 * reset them
4104 *
4105 * This setting works under auto fan control mode only. It adjusts the PMFW's
4106 * behavior about the maximum speed in RPM the fan can spin. Setting via this
4107 * interface will switch the fan control to auto mode implicitly.
4108 */
acoustic_limit_threshold_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4109 static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
4110 struct kobj_attribute *attr,
4111 char *buf)
4112 {
4113 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4114 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4115
4116 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
4117 }
4118
acoustic_limit_threshold_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4119 static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
4120 struct kobj_attribute *attr,
4121 const char *buf,
4122 size_t count)
4123 {
4124 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4125 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4126
4127 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4128 PP_OD_EDIT_ACOUSTIC_LIMIT,
4129 buf,
4130 count);
4131 }
4132
acoustic_limit_threshold_visible(struct amdgpu_device * adev)4133 static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
4134 {
4135 umode_t umode = 0000;
4136
4137 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
4138 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4139
4140 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
4141 umode |= S_IWUSR;
4142
4143 return umode;
4144 }
4145
4146 /**
4147 * DOC: acoustic_target_rpm_threshold
4148 *
4149 * The amdgpu driver provides a sysfs API for checking and adjusting the
4150 * acoustic target in RPM for fan control.
4151 *
4152 * Reading back the file shows you the current setting and the permitted
4153 * ranges if changable.
4154 *
4155 * Writing an integer to the file, change the setting accordingly.
4156 *
4157 * When you have finished the editing, write "c" (commit) to the file to commit
4158 * your changes.
4159 *
4160 * If you want to reset to the default value, write "r" (reset) to the file to
4161 * reset them
4162 *
4163 * This setting works under auto fan control mode only. It can co-exist with
4164 * other settings which can work also under auto mode. It adjusts the PMFW's
4165 * behavior about the maximum speed in RPM the fan can spin when ASIC
4166 * temperature is not greater than target temperature. Setting via this
4167 * interface will switch the fan control to auto mode implicitly.
4168 */
acoustic_target_threshold_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4169 static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
4170 struct kobj_attribute *attr,
4171 char *buf)
4172 {
4173 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4174 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4175
4176 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
4177 }
4178
acoustic_target_threshold_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4179 static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
4180 struct kobj_attribute *attr,
4181 const char *buf,
4182 size_t count)
4183 {
4184 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4185 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4186
4187 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4188 PP_OD_EDIT_ACOUSTIC_TARGET,
4189 buf,
4190 count);
4191 }
4192
acoustic_target_threshold_visible(struct amdgpu_device * adev)4193 static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
4194 {
4195 umode_t umode = 0000;
4196
4197 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
4198 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4199
4200 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
4201 umode |= S_IWUSR;
4202
4203 return umode;
4204 }
4205
4206 /**
4207 * DOC: fan_target_temperature
4208 *
4209 * The amdgpu driver provides a sysfs API for checking and adjusting the
4210 * target tempeature in Celsius degree for fan control.
4211 *
4212 * Reading back the file shows you the current setting and the permitted
4213 * ranges if changable.
4214 *
4215 * Writing an integer to the file, change the setting accordingly.
4216 *
4217 * When you have finished the editing, write "c" (commit) to the file to commit
4218 * your changes.
4219 *
4220 * If you want to reset to the default value, write "r" (reset) to the file to
4221 * reset them
4222 *
4223 * This setting works under auto fan control mode only. It can co-exist with
4224 * other settings which can work also under auto mode. Paring with the
4225 * acoustic_target_rpm_threshold setting, they define the maximum speed in
4226 * RPM the fan can spin when ASIC temperature is not greater than target
4227 * temperature. Setting via this interface will switch the fan control to
4228 * auto mode implicitly.
4229 */
fan_target_temperature_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4230 static ssize_t fan_target_temperature_show(struct kobject *kobj,
4231 struct kobj_attribute *attr,
4232 char *buf)
4233 {
4234 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4235 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4236
4237 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
4238 }
4239
fan_target_temperature_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4240 static ssize_t fan_target_temperature_store(struct kobject *kobj,
4241 struct kobj_attribute *attr,
4242 const char *buf,
4243 size_t count)
4244 {
4245 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4246 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4247
4248 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4249 PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
4250 buf,
4251 count);
4252 }
4253
fan_target_temperature_visible(struct amdgpu_device * adev)4254 static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
4255 {
4256 umode_t umode = 0000;
4257
4258 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
4259 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4260
4261 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
4262 umode |= S_IWUSR;
4263
4264 return umode;
4265 }
4266
4267 /**
4268 * DOC: fan_minimum_pwm
4269 *
4270 * The amdgpu driver provides a sysfs API for checking and adjusting the
4271 * minimum fan speed in PWM.
4272 *
4273 * Reading back the file shows you the current setting and the permitted
4274 * ranges if changable.
4275 *
4276 * Writing an integer to the file, change the setting accordingly.
4277 *
4278 * When you have finished the editing, write "c" (commit) to the file to commit
4279 * your changes.
4280 *
4281 * If you want to reset to the default value, write "r" (reset) to the file to
4282 * reset them
4283 *
4284 * This setting works under auto fan control mode only. It can co-exist with
4285 * other settings which can work also under auto mode. It adjusts the PMFW's
4286 * behavior about the minimum fan speed in PWM the fan should spin. Setting
4287 * via this interface will switch the fan control to auto mode implicitly.
4288 */
fan_minimum_pwm_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4289 static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
4290 struct kobj_attribute *attr,
4291 char *buf)
4292 {
4293 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4294 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4295
4296 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
4297 }
4298
fan_minimum_pwm_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4299 static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
4300 struct kobj_attribute *attr,
4301 const char *buf,
4302 size_t count)
4303 {
4304 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4305 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4306
4307 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4308 PP_OD_EDIT_FAN_MINIMUM_PWM,
4309 buf,
4310 count);
4311 }
4312
fan_minimum_pwm_visible(struct amdgpu_device * adev)4313 static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
4314 {
4315 umode_t umode = 0000;
4316
4317 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
4318 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4319
4320 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
4321 umode |= S_IWUSR;
4322
4323 return umode;
4324 }
4325
4326 /**
4327 * DOC: fan_zero_rpm_enable
4328 *
4329 * The amdgpu driver provides a sysfs API for checking and adjusting the
4330 * zero RPM feature.
4331 *
4332 * Reading back the file shows you the current setting and the permitted
4333 * ranges if changable.
4334 *
4335 * Writing an integer to the file, change the setting accordingly.
4336 *
4337 * When you have finished the editing, write "c" (commit) to the file to commit
4338 * your changes.
4339 *
4340 * If you want to reset to the default value, write "r" (reset) to the file to
4341 * reset them.
4342 */
fan_zero_rpm_enable_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4343 static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
4344 struct kobj_attribute *attr,
4345 char *buf)
4346 {
4347 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4348 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4349
4350 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
4351 }
4352
fan_zero_rpm_enable_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4353 static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
4354 struct kobj_attribute *attr,
4355 const char *buf,
4356 size_t count)
4357 {
4358 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4359 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4360
4361 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4362 PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
4363 buf,
4364 count);
4365 }
4366
fan_zero_rpm_enable_visible(struct amdgpu_device * adev)4367 static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
4368 {
4369 umode_t umode = 0000;
4370
4371 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
4372 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4373
4374 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
4375 umode |= S_IWUSR;
4376
4377 return umode;
4378 }
4379
4380 /**
4381 * DOC: fan_zero_rpm_stop_temperature
4382 *
4383 * The amdgpu driver provides a sysfs API for checking and adjusting the
4384 * zero RPM stop temperature feature.
4385 *
4386 * Reading back the file shows you the current setting and the permitted
4387 * ranges if changable.
4388 *
4389 * Writing an integer to the file, change the setting accordingly.
4390 *
4391 * When you have finished the editing, write "c" (commit) to the file to commit
4392 * your changes.
4393 *
4394 * If you want to reset to the default value, write "r" (reset) to the file to
4395 * reset them.
4396 *
4397 * This setting works only if the Zero RPM setting is enabled. It adjusts the
4398 * temperature below which the fan can stop.
4399 */
fan_zero_rpm_stop_temp_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4400 static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
4401 struct kobj_attribute *attr,
4402 char *buf)
4403 {
4404 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4405 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4406
4407 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
4408 }
4409
fan_zero_rpm_stop_temp_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4410 static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
4411 struct kobj_attribute *attr,
4412 const char *buf,
4413 size_t count)
4414 {
4415 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4416 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4417
4418 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4419 PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
4420 buf,
4421 count);
4422 }
4423
fan_zero_rpm_stop_temp_visible(struct amdgpu_device * adev)4424 static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
4425 {
4426 umode_t umode = 0000;
4427
4428 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
4429 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4430
4431 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
4432 umode |= S_IWUSR;
4433
4434 return umode;
4435 }
4436
4437 static struct od_feature_set amdgpu_od_set = {
4438 .containers = {
4439 [0] = {
4440 .name = "fan_ctrl",
4441 .sub_feature = {
4442 [0] = {
4443 .name = "fan_curve",
4444 .ops = {
4445 .is_visible = fan_curve_visible,
4446 .show = fan_curve_show,
4447 .store = fan_curve_store,
4448 },
4449 },
4450 [1] = {
4451 .name = "acoustic_limit_rpm_threshold",
4452 .ops = {
4453 .is_visible = acoustic_limit_threshold_visible,
4454 .show = acoustic_limit_threshold_show,
4455 .store = acoustic_limit_threshold_store,
4456 },
4457 },
4458 [2] = {
4459 .name = "acoustic_target_rpm_threshold",
4460 .ops = {
4461 .is_visible = acoustic_target_threshold_visible,
4462 .show = acoustic_target_threshold_show,
4463 .store = acoustic_target_threshold_store,
4464 },
4465 },
4466 [3] = {
4467 .name = "fan_target_temperature",
4468 .ops = {
4469 .is_visible = fan_target_temperature_visible,
4470 .show = fan_target_temperature_show,
4471 .store = fan_target_temperature_store,
4472 },
4473 },
4474 [4] = {
4475 .name = "fan_minimum_pwm",
4476 .ops = {
4477 .is_visible = fan_minimum_pwm_visible,
4478 .show = fan_minimum_pwm_show,
4479 .store = fan_minimum_pwm_store,
4480 },
4481 },
4482 [5] = {
4483 .name = "fan_zero_rpm_enable",
4484 .ops = {
4485 .is_visible = fan_zero_rpm_enable_visible,
4486 .show = fan_zero_rpm_enable_show,
4487 .store = fan_zero_rpm_enable_store,
4488 },
4489 },
4490 [6] = {
4491 .name = "fan_zero_rpm_stop_temperature",
4492 .ops = {
4493 .is_visible = fan_zero_rpm_stop_temp_visible,
4494 .show = fan_zero_rpm_stop_temp_show,
4495 .store = fan_zero_rpm_stop_temp_store,
4496 },
4497 },
4498 },
4499 },
4500 },
4501 };
4502
od_kobj_release(struct kobject * kobj)4503 static void od_kobj_release(struct kobject *kobj)
4504 {
4505 struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
4506
4507 kfree(od_kobj);
4508 }
4509
4510 static const struct kobj_type od_ktype = {
4511 .release = od_kobj_release,
4512 .sysfs_ops = &kobj_sysfs_ops,
4513 };
4514
amdgpu_od_set_fini(struct amdgpu_device * adev)4515 static void amdgpu_od_set_fini(struct amdgpu_device *adev)
4516 {
4517 struct od_kobj *container, *container_next;
4518 struct od_attribute *attribute, *attribute_next;
4519
4520 if (list_empty(&adev->pm.od_kobj_list))
4521 return;
4522
4523 list_for_each_entry_safe(container, container_next,
4524 &adev->pm.od_kobj_list, entry) {
4525 list_del(&container->entry);
4526
4527 list_for_each_entry_safe(attribute, attribute_next,
4528 &container->attribute, entry) {
4529 list_del(&attribute->entry);
4530 sysfs_remove_file(&container->kobj,
4531 &attribute->attribute.attr);
4532 kfree(attribute);
4533 }
4534
4535 kobject_put(&container->kobj);
4536 }
4537 }
4538
amdgpu_is_od_feature_supported(struct amdgpu_device * adev,struct od_feature_ops * feature_ops)4539 static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4540 struct od_feature_ops *feature_ops)
4541 {
4542 umode_t mode;
4543
4544 if (!feature_ops->is_visible)
4545 return false;
4546
4547 /*
4548 * If the feature has no user read and write mode set,
4549 * we can assume the feature is actually not supported.(?)
4550 * And the revelant sysfs interface should not be exposed.
4551 */
4552 mode = feature_ops->is_visible(adev);
4553 if (mode & (S_IRUSR | S_IWUSR))
4554 return true;
4555
4556 return false;
4557 }
4558
amdgpu_od_is_self_contained(struct amdgpu_device * adev,struct od_feature_container * container)4559 static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4560 struct od_feature_container *container)
4561 {
4562 int i;
4563
4564 /*
4565 * If there is no valid entry within the container, the container
4566 * is recognized as a self contained container. And the valid entry
4567 * here means it has a valid naming and it is visible/supported by
4568 * the ASIC.
4569 */
4570 for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4571 if (container->sub_feature[i].name &&
4572 amdgpu_is_od_feature_supported(adev,
4573 &container->sub_feature[i].ops))
4574 return false;
4575 }
4576
4577 return true;
4578 }
4579
amdgpu_od_set_init(struct amdgpu_device * adev)4580 static int amdgpu_od_set_init(struct amdgpu_device *adev)
4581 {
4582 struct od_kobj *top_set, *sub_set;
4583 struct od_attribute *attribute;
4584 struct od_feature_container *container;
4585 struct od_feature_item *feature;
4586 int i, j;
4587 int ret;
4588
4589 /* Setup the top `gpu_od` directory which holds all other OD interfaces */
4590 top_set = kzalloc_obj(*top_set);
4591 if (!top_set)
4592 return -ENOMEM;
4593 list_add(&top_set->entry, &adev->pm.od_kobj_list);
4594
4595 ret = kobject_init_and_add(&top_set->kobj,
4596 &od_ktype,
4597 &adev->dev->kobj,
4598 "%s",
4599 "gpu_od");
4600 if (ret)
4601 goto err_out;
4602 INIT_LIST_HEAD(&top_set->attribute);
4603 top_set->priv = adev;
4604
4605 for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4606 container = &amdgpu_od_set.containers[i];
4607
4608 if (!container->name)
4609 continue;
4610
4611 /*
4612 * If there is valid entries within the container, the container
4613 * will be presented as a sub directory and all its holding entries
4614 * will be presented as plain files under it.
4615 * While if there is no valid entry within the container, the container
4616 * itself will be presented as a plain file under top `gpu_od` directory.
4617 */
4618 if (amdgpu_od_is_self_contained(adev, container)) {
4619 if (!amdgpu_is_od_feature_supported(adev,
4620 &container->ops))
4621 continue;
4622
4623 /*
4624 * The container is presented as a plain file under top `gpu_od`
4625 * directory.
4626 */
4627 attribute = kzalloc_obj(*attribute);
4628 if (!attribute) {
4629 ret = -ENOMEM;
4630 goto err_out;
4631 }
4632 list_add(&attribute->entry, &top_set->attribute);
4633
4634 attribute->attribute.attr.mode =
4635 container->ops.is_visible(adev);
4636 attribute->attribute.attr.name = container->name;
4637 attribute->attribute.show =
4638 container->ops.show;
4639 attribute->attribute.store =
4640 container->ops.store;
4641 ret = sysfs_create_file(&top_set->kobj,
4642 &attribute->attribute.attr);
4643 if (ret)
4644 goto err_out;
4645 } else {
4646 /* The container is presented as a sub directory. */
4647 sub_set = kzalloc_obj(*sub_set);
4648 if (!sub_set) {
4649 ret = -ENOMEM;
4650 goto err_out;
4651 }
4652 list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4653
4654 ret = kobject_init_and_add(&sub_set->kobj,
4655 &od_ktype,
4656 &top_set->kobj,
4657 "%s",
4658 container->name);
4659 if (ret)
4660 goto err_out;
4661 INIT_LIST_HEAD(&sub_set->attribute);
4662 sub_set->priv = adev;
4663
4664 for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4665 feature = &container->sub_feature[j];
4666 if (!feature->name)
4667 continue;
4668
4669 if (!amdgpu_is_od_feature_supported(adev,
4670 &feature->ops))
4671 continue;
4672
4673 /*
4674 * With the container presented as a sub directory, the entry within
4675 * it is presented as a plain file under the sub directory.
4676 */
4677 attribute = kzalloc_obj(*attribute);
4678 if (!attribute) {
4679 ret = -ENOMEM;
4680 goto err_out;
4681 }
4682 list_add(&attribute->entry, &sub_set->attribute);
4683
4684 attribute->attribute.attr.mode =
4685 feature->ops.is_visible(adev);
4686 attribute->attribute.attr.name = feature->name;
4687 attribute->attribute.show =
4688 feature->ops.show;
4689 attribute->attribute.store =
4690 feature->ops.store;
4691 ret = sysfs_create_file(&sub_set->kobj,
4692 &attribute->attribute.attr);
4693 if (ret)
4694 goto err_out;
4695 }
4696 }
4697 }
4698
4699 /*
4700 * If gpu_od is the only member in the list, that means gpu_od is an
4701 * empty directory, so remove it.
4702 */
4703 if (list_is_singular(&adev->pm.od_kobj_list))
4704 goto err_out;
4705
4706 return 0;
4707
4708 err_out:
4709 amdgpu_od_set_fini(adev);
4710
4711 return ret;
4712 }
4713
amdgpu_pm_sysfs_init(struct amdgpu_device * adev)4714 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4715 {
4716 enum amdgpu_sriov_vf_mode mode;
4717 uint32_t mask = 0;
4718 uint32_t tmp;
4719 int ret;
4720
4721 if (adev->pm.sysfs_initialized)
4722 return 0;
4723
4724 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4725
4726 if (adev->pm.dpm_enabled == 0)
4727 return 0;
4728
4729 mode = amdgpu_virt_get_sriov_vf_mode(adev);
4730
4731 /* under multi-vf mode, the hwmon attributes are all not supported */
4732 if (mode != SRIOV_VF_MODE_MULTI_VF) {
4733 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4734 DRIVER_NAME, adev,
4735 hwmon_groups);
4736 if (IS_ERR(adev->pm.int_hwmon_dev)) {
4737 ret = PTR_ERR(adev->pm.int_hwmon_dev);
4738 dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4739 return ret;
4740 }
4741 }
4742
4743 switch (mode) {
4744 case SRIOV_VF_MODE_ONE_VF:
4745 mask = ATTR_FLAG_ONEVF;
4746 break;
4747 case SRIOV_VF_MODE_MULTI_VF:
4748 mask = 0;
4749 break;
4750 case SRIOV_VF_MODE_BARE_METAL:
4751 default:
4752 mask = ATTR_FLAG_MASK_ALL;
4753 break;
4754 }
4755
4756 ret = amdgpu_device_attr_create_groups(adev,
4757 amdgpu_device_attrs,
4758 ARRAY_SIZE(amdgpu_device_attrs),
4759 mask,
4760 &adev->pm.pm_attr_list);
4761 if (ret)
4762 goto err_out0;
4763
4764 if (amdgpu_dpm_is_overdrive_supported(adev)) {
4765 ret = amdgpu_od_set_init(adev);
4766 if (ret)
4767 goto err_out1;
4768 } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
4769 dev_info(adev->dev, "overdrive feature is not supported\n");
4770 }
4771
4772 if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
4773 -EOPNOTSUPP) {
4774 ret = devm_device_add_group(adev->dev,
4775 &amdgpu_pm_policy_attr_group);
4776 if (ret)
4777 goto err_out1;
4778 }
4779
4780 if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
4781 ret = devm_device_add_group(adev->dev,
4782 &amdgpu_board_attr_group);
4783 if (ret)
4784 goto err_out1;
4785 if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
4786 (void *)&tmp) != -EOPNOTSUPP) {
4787 sysfs_add_file_to_group(&adev->dev->kobj,
4788 &dev_attr_cur_node_power_limit.attr,
4789 amdgpu_board_attr_group.name);
4790 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr,
4791 amdgpu_board_attr_group.name);
4792 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr,
4793 amdgpu_board_attr_group.name);
4794 sysfs_add_file_to_group(&adev->dev->kobj,
4795 &dev_attr_max_node_power_limit.attr,
4796 amdgpu_board_attr_group.name);
4797 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr,
4798 amdgpu_board_attr_group.name);
4799 }
4800 if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER_LIMIT,
4801 (void *)&tmp) != -EOPNOTSUPP) {
4802 sysfs_add_file_to_group(&adev->dev->kobj,
4803 &dev_attr_baseboard_power_limit.attr,
4804 amdgpu_board_attr_group.name);
4805 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_baseboard_power.attr,
4806 amdgpu_board_attr_group.name);
4807 }
4808 }
4809
4810 adev->pm.sysfs_initialized = true;
4811
4812 return 0;
4813
4814 err_out1:
4815 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4816 err_out0:
4817 if (adev->pm.int_hwmon_dev)
4818 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4819
4820 return ret;
4821 }
4822
amdgpu_pm_sysfs_fini(struct amdgpu_device * adev)4823 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4824 {
4825 amdgpu_od_set_fini(adev);
4826
4827 if (adev->pm.int_hwmon_dev)
4828 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4829
4830 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4831 }
4832
4833 /*
4834 * Debugfs info
4835 */
4836 #if defined(CONFIG_DEBUG_FS)
4837
amdgpu_debugfs_prints_cpu_info(struct seq_file * m,struct amdgpu_device * adev)4838 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4839 struct amdgpu_device *adev)
4840 {
4841 uint16_t *p_val;
4842 uint32_t size;
4843 int i;
4844 uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4845
4846 if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4847 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4848 GFP_KERNEL);
4849
4850 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4851 (void *)p_val, &size)) {
4852 for (i = 0; i < num_cpu_cores; i++)
4853 seq_printf(m, "\t%u MHz (CPU%d)\n",
4854 *(p_val + i), i);
4855 }
4856
4857 kfree(p_val);
4858 }
4859 }
4860
amdgpu_debugfs_pm_info_pp(struct seq_file * m,struct amdgpu_device * adev)4861 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4862 {
4863 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4864 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4865 uint32_t value;
4866 uint64_t value64 = 0;
4867 uint32_t query = 0;
4868 int size;
4869
4870 /* GPU Clocks */
4871 size = sizeof(value);
4872 seq_printf(m, "GFX Clocks and Power:\n");
4873
4874 amdgpu_debugfs_prints_cpu_info(m, adev);
4875
4876 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4877 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4878 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4879 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4880 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4881 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4882 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4883 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4884 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4885 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4886 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4887 seq_printf(m, "\t%u mV (VDDNB)\n", value);
4888 size = sizeof(uint32_t);
4889 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
4890 if (adev->flags & AMD_IS_APU)
4891 seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
4892 else
4893 seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
4894 }
4895 size = sizeof(uint32_t);
4896 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
4897 if (adev->flags & AMD_IS_APU)
4898 seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
4899 else
4900 seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
4901 }
4902 size = sizeof(value);
4903 seq_printf(m, "\n");
4904
4905 /* GPU Temp */
4906 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4907 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4908
4909 /* GPU Load */
4910 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4911 seq_printf(m, "GPU Load: %u %%\n", value);
4912 /* MEM Load */
4913 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4914 seq_printf(m, "MEM Load: %u %%\n", value);
4915 /* VCN Load */
4916 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
4917 seq_printf(m, "VCN Load: %u %%\n", value);
4918
4919 seq_printf(m, "\n");
4920
4921 /* SMC feature mask */
4922 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4923 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4924
4925 /* ASICs greater than CHIP_VEGA20 supports these sensors */
4926 if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4927 /* VCN clocks */
4928 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4929 if (!value) {
4930 seq_printf(m, "VCN: Powered down\n");
4931 } else {
4932 seq_printf(m, "VCN: Powered up\n");
4933 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4934 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4935 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4936 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4937 }
4938 }
4939 seq_printf(m, "\n");
4940 } else {
4941 /* UVD clocks */
4942 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4943 if (!value) {
4944 seq_printf(m, "UVD: Powered down\n");
4945 } else {
4946 seq_printf(m, "UVD: Powered up\n");
4947 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4948 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4949 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4950 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4951 }
4952 }
4953 seq_printf(m, "\n");
4954
4955 /* VCE clocks */
4956 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4957 if (!value) {
4958 seq_printf(m, "VCE: Powered down\n");
4959 } else {
4960 seq_printf(m, "VCE: Powered up\n");
4961 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4962 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4963 }
4964 }
4965 }
4966
4967 return 0;
4968 }
4969
4970 static const struct cg_flag_name clocks[] = {
4971 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4972 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4973 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4974 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4975 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4976 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4977 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4978 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4979 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4980 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4981 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4982 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4983 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4984 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4985 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4986 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4987 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4988 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4989 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4990 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4991 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4992 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4993 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4994 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4995 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4996 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4997 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4998 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4999 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
5000 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
5001 {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
5002 {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
5003 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
5004 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
5005 {0, NULL},
5006 };
5007
amdgpu_parse_cg_state(struct seq_file * m,u64 flags)5008 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
5009 {
5010 int i;
5011
5012 for (i = 0; clocks[i].flag; i++)
5013 seq_printf(m, "\t%s: %s\n", clocks[i].name,
5014 (flags & clocks[i].flag) ? "On" : "Off");
5015 }
5016
amdgpu_debugfs_pm_info_show(struct seq_file * m,void * unused)5017 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
5018 {
5019 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
5020 u64 flags = 0;
5021 int r;
5022
5023 r = amdgpu_pm_get_access(adev);
5024 if (r < 0)
5025 return r;
5026
5027 if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
5028 r = amdgpu_debugfs_pm_info_pp(m, adev);
5029 if (r)
5030 goto out;
5031 }
5032
5033 amdgpu_device_ip_get_clockgating_state(adev, &flags);
5034
5035 seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
5036 amdgpu_parse_cg_state(m, flags);
5037 seq_printf(m, "\n");
5038
5039 out:
5040 amdgpu_pm_put_access(adev);
5041
5042 return r;
5043 }
5044
5045 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
5046
5047 /*
5048 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
5049 *
5050 * Reads debug memory region allocated to PMFW
5051 */
amdgpu_pm_prv_buffer_read(struct file * f,char __user * buf,size_t size,loff_t * pos)5052 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
5053 size_t size, loff_t *pos)
5054 {
5055 struct amdgpu_device *adev = file_inode(f)->i_private;
5056 size_t smu_prv_buf_size;
5057 void *smu_prv_buf;
5058 int ret = 0;
5059
5060 ret = amdgpu_pm_dev_state_check(adev, true);
5061 if (ret)
5062 return ret;
5063
5064 ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
5065 if (ret)
5066 return ret;
5067
5068 if (!smu_prv_buf || !smu_prv_buf_size)
5069 return -EINVAL;
5070
5071 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
5072 smu_prv_buf_size);
5073 }
5074
5075 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
5076 .owner = THIS_MODULE,
5077 .open = simple_open,
5078 .read = amdgpu_pm_prv_buffer_read,
5079 .llseek = default_llseek,
5080 };
5081
5082 #endif
5083
amdgpu_debugfs_pm_init(struct amdgpu_device * adev)5084 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
5085 {
5086 #if defined(CONFIG_DEBUG_FS)
5087 struct drm_minor *minor = adev_to_drm(adev)->primary;
5088 struct dentry *root = minor->debugfs_root;
5089
5090 if (!adev->pm.dpm_enabled)
5091 return;
5092
5093 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
5094 &amdgpu_debugfs_pm_info_fops);
5095
5096 if (adev->pm.smu_prv_buffer_size > 0)
5097 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
5098 adev,
5099 &amdgpu_debugfs_pm_prv_buffer_fops,
5100 adev->pm.smu_prv_buffer_size);
5101
5102 amdgpu_dpm_stb_debug_fs_init(adev);
5103 #endif
5104 }
5105