1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
24 */
25
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <asm/processor.h>
37
38 #define MAX_NUM_OF_FEATURES_PER_SUBSET 8
39 #define MAX_NUM_OF_SUBSETS 8
40
41 #define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name)
42
43 struct od_attribute {
44 struct kobj_attribute attribute;
45 struct list_head entry;
46 };
47
48 struct od_kobj {
49 struct kobject kobj;
50 struct list_head entry;
51 struct list_head attribute;
52 void *priv;
53 };
54
55 struct od_feature_ops {
56 umode_t (*is_visible)(struct amdgpu_device *adev);
57 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
58 char *buf);
59 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
60 const char *buf, size_t count);
61 };
62
63 struct od_feature_item {
64 const char *name;
65 struct od_feature_ops ops;
66 };
67
68 struct od_feature_container {
69 char *name;
70 struct od_feature_ops ops;
71 struct od_feature_item sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
72 };
73
74 struct od_feature_set {
75 struct od_feature_container containers[MAX_NUM_OF_SUBSETS];
76 };
77
78 static const struct hwmon_temp_label {
79 enum PP_HWMON_TEMP channel;
80 const char *label;
81 } temp_label[] = {
82 {PP_TEMP_EDGE, "edge"},
83 {PP_TEMP_JUNCTION, "junction"},
84 {PP_TEMP_MEM, "mem"},
85 };
86
87 const char * const amdgpu_pp_profile_name[] = {
88 "BOOTUP_DEFAULT",
89 "3D_FULL_SCREEN",
90 "POWER_SAVING",
91 "VIDEO",
92 "VR",
93 "COMPUTE",
94 "CUSTOM",
95 "WINDOW_3D",
96 "CAPPED",
97 "UNCAPPED",
98 };
99
100 /**
101 * amdgpu_pm_dev_state_check - Check if device can be accessed.
102 * @adev: Target device.
103 * @runpm: Check runpm status for suspend state checks.
104 *
105 * Checks the state of the @adev for access. Return 0 if the device is
106 * accessible or a negative error code otherwise.
107 */
amdgpu_pm_dev_state_check(struct amdgpu_device * adev,bool runpm)108 static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
109 {
110 bool runpm_check = runpm ? adev->in_runpm : false;
111
112 if (amdgpu_in_reset(adev))
113 return -EBUSY;
114
115 if (adev->in_suspend && !runpm_check)
116 return -EBUSY;
117
118 return 0;
119 }
120
121 /**
122 * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
123 * @adev: Target device.
124 *
125 * Checks the state of the @adev for access. Use runtime pm API to resume if
126 * needed. Return 0 if the device is accessible or a negative error code
127 * otherwise.
128 */
amdgpu_pm_get_access(struct amdgpu_device * adev)129 static int amdgpu_pm_get_access(struct amdgpu_device *adev)
130 {
131 int ret;
132
133 ret = amdgpu_pm_dev_state_check(adev, true);
134 if (ret)
135 return ret;
136
137 return pm_runtime_resume_and_get(adev->dev);
138 }
139
140 /**
141 * amdgpu_pm_get_access_if_active - Check if device is active for access.
142 * @adev: Target device.
143 *
144 * Checks the state of the @adev for access. Use runtime pm API to determine
145 * if device is active. Allow access only if device is active.Return 0 if the
146 * device is accessible or a negative error code otherwise.
147 */
amdgpu_pm_get_access_if_active(struct amdgpu_device * adev)148 static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
149 {
150 int ret;
151
152 /* Ignore runpm status. If device is in suspended state, deny access */
153 ret = amdgpu_pm_dev_state_check(adev, false);
154 if (ret)
155 return ret;
156
157 /*
158 * Allow only if device is active. If runpm is disabled also, as in
159 * kernels without CONFIG_PM, allow access.
160 */
161 ret = pm_runtime_get_if_active(adev->dev);
162 if (!ret)
163 return -EPERM;
164
165 return 0;
166 }
167
168 /**
169 * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
170 * @adev: Target device.
171 *
172 * Should be paired with amdgpu_pm_get_access* calls
173 */
amdgpu_pm_put_access(struct amdgpu_device * adev)174 static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
175 {
176 pm_runtime_mark_last_busy(adev->dev);
177 pm_runtime_put_autosuspend(adev->dev);
178 }
179
180 /**
181 * DOC: power_dpm_state
182 *
183 * The power_dpm_state file is a legacy interface and is only provided for
184 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
185 * certain power related parameters. The file power_dpm_state is used for this.
186 * It accepts the following arguments:
187 *
188 * - battery
189 *
190 * - balanced
191 *
192 * - performance
193 *
194 * battery
195 *
196 * On older GPUs, the vbios provided a special power state for battery
197 * operation. Selecting battery switched to this state. This is no
198 * longer provided on newer GPUs so the option does nothing in that case.
199 *
200 * balanced
201 *
202 * On older GPUs, the vbios provided a special power state for balanced
203 * operation. Selecting balanced switched to this state. This is no
204 * longer provided on newer GPUs so the option does nothing in that case.
205 *
206 * performance
207 *
208 * On older GPUs, the vbios provided a special power state for performance
209 * operation. Selecting performance switched to this state. This is no
210 * longer provided on newer GPUs so the option does nothing in that case.
211 *
212 */
213
amdgpu_get_power_dpm_state(struct device * dev,struct device_attribute * attr,char * buf)214 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
215 struct device_attribute *attr,
216 char *buf)
217 {
218 struct drm_device *ddev = dev_get_drvdata(dev);
219 struct amdgpu_device *adev = drm_to_adev(ddev);
220 enum amd_pm_state_type pm;
221 int ret;
222
223 ret = amdgpu_pm_get_access_if_active(adev);
224 if (ret)
225 return ret;
226
227 amdgpu_dpm_get_current_power_state(adev, &pm);
228
229 amdgpu_pm_put_access(adev);
230
231 return sysfs_emit(buf, "%s\n",
232 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
233 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
234 }
235
amdgpu_set_power_dpm_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)236 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
237 struct device_attribute *attr,
238 const char *buf,
239 size_t count)
240 {
241 struct drm_device *ddev = dev_get_drvdata(dev);
242 struct amdgpu_device *adev = drm_to_adev(ddev);
243 enum amd_pm_state_type state;
244 int ret;
245
246 if (strncmp("battery", buf, strlen("battery")) == 0)
247 state = POWER_STATE_TYPE_BATTERY;
248 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
249 state = POWER_STATE_TYPE_BALANCED;
250 else if (strncmp("performance", buf, strlen("performance")) == 0)
251 state = POWER_STATE_TYPE_PERFORMANCE;
252 else
253 return -EINVAL;
254
255 ret = amdgpu_pm_get_access(adev);
256 if (ret < 0)
257 return ret;
258
259 amdgpu_dpm_set_power_state(adev, state);
260
261 amdgpu_pm_put_access(adev);
262
263 return count;
264 }
265
266
267 /**
268 * DOC: power_dpm_force_performance_level
269 *
270 * The amdgpu driver provides a sysfs API for adjusting certain power
271 * related parameters. The file power_dpm_force_performance_level is
272 * used for this. It accepts the following arguments:
273 *
274 * - auto
275 *
276 * - low
277 *
278 * - high
279 *
280 * - manual
281 *
282 * - profile_standard
283 *
284 * - profile_min_sclk
285 *
286 * - profile_min_mclk
287 *
288 * - profile_peak
289 *
290 * auto
291 *
292 * When auto is selected, the driver will attempt to dynamically select
293 * the optimal power profile for current conditions in the driver.
294 *
295 * low
296 *
297 * When low is selected, the clocks are forced to the lowest power state.
298 *
299 * high
300 *
301 * When high is selected, the clocks are forced to the highest power state.
302 *
303 * manual
304 *
305 * When manual is selected, the user can manually adjust which power states
306 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
307 * and pp_dpm_pcie files and adjust the power state transition heuristics
308 * via the pp_power_profile_mode sysfs file.
309 *
310 * profile_standard
311 * profile_min_sclk
312 * profile_min_mclk
313 * profile_peak
314 *
315 * When the profiling modes are selected, clock and power gating are
316 * disabled and the clocks are set for different profiling cases. This
317 * mode is recommended for profiling specific work loads where you do
318 * not want clock or power gating for clock fluctuation to interfere
319 * with your results. profile_standard sets the clocks to a fixed clock
320 * level which varies from asic to asic. profile_min_sclk forces the sclk
321 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
322 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
323 *
324 */
325
amdgpu_get_power_dpm_force_performance_level(struct device * dev,struct device_attribute * attr,char * buf)326 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
327 struct device_attribute *attr,
328 char *buf)
329 {
330 struct drm_device *ddev = dev_get_drvdata(dev);
331 struct amdgpu_device *adev = drm_to_adev(ddev);
332 enum amd_dpm_forced_level level = 0xff;
333 int ret;
334
335 ret = amdgpu_pm_get_access_if_active(adev);
336 if (ret)
337 return ret;
338
339 level = amdgpu_dpm_get_performance_level(adev);
340
341 amdgpu_pm_put_access(adev);
342
343 return sysfs_emit(buf, "%s\n",
344 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
345 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
346 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
347 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
348 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
349 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
350 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
351 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
352 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
353 "unknown");
354 }
355
amdgpu_set_power_dpm_force_performance_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)356 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
357 struct device_attribute *attr,
358 const char *buf,
359 size_t count)
360 {
361 struct drm_device *ddev = dev_get_drvdata(dev);
362 struct amdgpu_device *adev = drm_to_adev(ddev);
363 enum amd_dpm_forced_level level;
364 int ret = 0;
365
366 if (strncmp("low", buf, strlen("low")) == 0) {
367 level = AMD_DPM_FORCED_LEVEL_LOW;
368 } else if (strncmp("high", buf, strlen("high")) == 0) {
369 level = AMD_DPM_FORCED_LEVEL_HIGH;
370 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
371 level = AMD_DPM_FORCED_LEVEL_AUTO;
372 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
373 level = AMD_DPM_FORCED_LEVEL_MANUAL;
374 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
375 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
376 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
377 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
378 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
379 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
380 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
381 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
382 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
383 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
384 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
385 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
386 } else {
387 return -EINVAL;
388 }
389
390 ret = amdgpu_pm_get_access(adev);
391 if (ret < 0)
392 return ret;
393
394 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
395 if (amdgpu_dpm_force_performance_level(adev, level)) {
396 amdgpu_pm_put_access(adev);
397 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
398 return -EINVAL;
399 }
400 /* override whatever a user ctx may have set */
401 adev->pm.stable_pstate_ctx = NULL;
402 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
403
404 amdgpu_pm_put_access(adev);
405
406 return count;
407 }
408
amdgpu_get_pp_num_states(struct device * dev,struct device_attribute * attr,char * buf)409 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
410 struct device_attribute *attr,
411 char *buf)
412 {
413 struct drm_device *ddev = dev_get_drvdata(dev);
414 struct amdgpu_device *adev = drm_to_adev(ddev);
415 struct pp_states_info data;
416 uint32_t i;
417 int buf_len, ret;
418
419 ret = amdgpu_pm_get_access_if_active(adev);
420 if (ret)
421 return ret;
422
423 if (amdgpu_dpm_get_pp_num_states(adev, &data))
424 memset(&data, 0, sizeof(data));
425
426 amdgpu_pm_put_access(adev);
427
428 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
429 for (i = 0; i < data.nums; i++)
430 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
431 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
432 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
433 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
434 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
435
436 return buf_len;
437 }
438
amdgpu_get_pp_cur_state(struct device * dev,struct device_attribute * attr,char * buf)439 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
440 struct device_attribute *attr,
441 char *buf)
442 {
443 struct drm_device *ddev = dev_get_drvdata(dev);
444 struct amdgpu_device *adev = drm_to_adev(ddev);
445 struct pp_states_info data = {0};
446 enum amd_pm_state_type pm = 0;
447 int i = 0, ret = 0;
448
449 ret = amdgpu_pm_get_access_if_active(adev);
450 if (ret)
451 return ret;
452
453 amdgpu_dpm_get_current_power_state(adev, &pm);
454
455 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
456
457 amdgpu_pm_put_access(adev);
458
459 if (ret)
460 return ret;
461
462 for (i = 0; i < data.nums; i++) {
463 if (pm == data.states[i])
464 break;
465 }
466
467 if (i == data.nums)
468 i = -EINVAL;
469
470 return sysfs_emit(buf, "%d\n", i);
471 }
472
amdgpu_get_pp_force_state(struct device * dev,struct device_attribute * attr,char * buf)473 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476 {
477 struct drm_device *ddev = dev_get_drvdata(dev);
478 struct amdgpu_device *adev = drm_to_adev(ddev);
479
480 if (adev->pm.pp_force_state_enabled)
481 return amdgpu_get_pp_cur_state(dev, attr, buf);
482 else
483 return sysfs_emit(buf, "\n");
484 }
485
amdgpu_set_pp_force_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)486 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
487 struct device_attribute *attr,
488 const char *buf,
489 size_t count)
490 {
491 struct drm_device *ddev = dev_get_drvdata(dev);
492 struct amdgpu_device *adev = drm_to_adev(ddev);
493 enum amd_pm_state_type state = 0;
494 struct pp_states_info data;
495 unsigned long idx;
496 int ret;
497
498 adev->pm.pp_force_state_enabled = false;
499
500 if (strlen(buf) == 1)
501 return count;
502
503 ret = kstrtoul(buf, 0, &idx);
504 if (ret || idx >= ARRAY_SIZE(data.states))
505 return -EINVAL;
506
507 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
508
509 ret = amdgpu_pm_get_access(adev);
510 if (ret < 0)
511 return ret;
512
513 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
514 if (ret)
515 goto err_out;
516
517 state = data.states[idx];
518
519 /* only set user selected power states */
520 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
521 state != POWER_STATE_TYPE_DEFAULT) {
522 ret = amdgpu_dpm_dispatch_task(adev,
523 AMD_PP_TASK_ENABLE_USER_STATE, &state);
524 if (ret)
525 goto err_out;
526
527 adev->pm.pp_force_state_enabled = true;
528 }
529
530 amdgpu_pm_put_access(adev);
531
532 return count;
533
534 err_out:
535 amdgpu_pm_put_access(adev);
536
537 return ret;
538 }
539
540 /**
541 * DOC: pp_table
542 *
543 * The amdgpu driver provides a sysfs API for uploading new powerplay
544 * tables. The file pp_table is used for this. Reading the file
545 * will dump the current power play table. Writing to the file
546 * will attempt to upload a new powerplay table and re-initialize
547 * powerplay using that new table.
548 *
549 */
550
amdgpu_get_pp_table(struct device * dev,struct device_attribute * attr,char * buf)551 static ssize_t amdgpu_get_pp_table(struct device *dev,
552 struct device_attribute *attr,
553 char *buf)
554 {
555 struct drm_device *ddev = dev_get_drvdata(dev);
556 struct amdgpu_device *adev = drm_to_adev(ddev);
557 char *table = NULL;
558 int size, ret;
559
560 ret = amdgpu_pm_get_access_if_active(adev);
561 if (ret)
562 return ret;
563
564 size = amdgpu_dpm_get_pp_table(adev, &table);
565
566 amdgpu_pm_put_access(adev);
567
568 if (size <= 0)
569 return size;
570
571 if (size >= PAGE_SIZE)
572 size = PAGE_SIZE - 1;
573
574 memcpy(buf, table, size);
575
576 return size;
577 }
578
amdgpu_set_pp_table(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)579 static ssize_t amdgpu_set_pp_table(struct device *dev,
580 struct device_attribute *attr,
581 const char *buf,
582 size_t count)
583 {
584 struct drm_device *ddev = dev_get_drvdata(dev);
585 struct amdgpu_device *adev = drm_to_adev(ddev);
586 int ret = 0;
587
588 ret = amdgpu_pm_get_access(adev);
589 if (ret < 0)
590 return ret;
591
592 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
593
594 amdgpu_pm_put_access(adev);
595
596 if (ret)
597 return ret;
598
599 return count;
600 }
601
602 /**
603 * DOC: pp_od_clk_voltage
604 *
605 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
606 * in each power level within a power state. The pp_od_clk_voltage is used for
607 * this.
608 *
609 * Note that the actual memory controller clock rate are exposed, not
610 * the effective memory clock of the DRAMs. To translate it, use the
611 * following formula:
612 *
613 * Clock conversion (Mhz):
614 *
615 * HBM: effective_memory_clock = memory_controller_clock * 1
616 *
617 * G5: effective_memory_clock = memory_controller_clock * 1
618 *
619 * G6: effective_memory_clock = memory_controller_clock * 2
620 *
621 * DRAM data rate (MT/s):
622 *
623 * HBM: effective_memory_clock * 2 = data_rate
624 *
625 * G5: effective_memory_clock * 4 = data_rate
626 *
627 * G6: effective_memory_clock * 8 = data_rate
628 *
629 * Bandwidth (MB/s):
630 *
631 * data_rate * vram_bit_width / 8 = memory_bandwidth
632 *
633 * Some examples:
634 *
635 * G5 on RX460:
636 *
637 * memory_controller_clock = 1750 Mhz
638 *
639 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
640 *
641 * data rate = 1750 * 4 = 7000 MT/s
642 *
643 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
644 *
645 * G6 on RX5700:
646 *
647 * memory_controller_clock = 875 Mhz
648 *
649 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
650 *
651 * data rate = 1750 * 8 = 14000 MT/s
652 *
653 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
654 *
655 * < For Vega10 and previous ASICs >
656 *
657 * Reading the file will display:
658 *
659 * - a list of engine clock levels and voltages labeled OD_SCLK
660 *
661 * - a list of memory clock levels and voltages labeled OD_MCLK
662 *
663 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
664 *
665 * To manually adjust these settings, first select manual using
666 * power_dpm_force_performance_level. Enter a new value for each
667 * level by writing a string that contains "s/m level clock voltage" to
668 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
669 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
670 * 810 mV. When you have edited all of the states as needed, write
671 * "c" (commit) to the file to commit your changes. If you want to reset to the
672 * default power levels, write "r" (reset) to the file to reset them.
673 *
674 *
675 * < For Vega20 and newer ASICs >
676 *
677 * Reading the file will display:
678 *
679 * - minimum and maximum engine clock labeled OD_SCLK
680 *
681 * - minimum(not available for Vega20 and Navi1x) and maximum memory
682 * clock labeled OD_MCLK
683 *
684 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
685 * They can be used to calibrate the sclk voltage curve. This is
686 * available for Vega20 and NV1X.
687 *
688 * - voltage offset(in mV) applied on target voltage calculation.
689 * This is available for Sienna Cichlid, Navy Flounder, Dimgrey
690 * Cavefish and some later SMU13 ASICs. For these ASICs, the target
691 * voltage calculation can be illustrated by "voltage = voltage
692 * calculated from v/f curve + overdrive vddgfx offset"
693 *
694 * - a list of valid ranges for sclk, mclk, voltage curve points
695 * or voltage offset labeled OD_RANGE
696 *
697 * < For APUs >
698 *
699 * Reading the file will display:
700 *
701 * - minimum and maximum engine clock labeled OD_SCLK
702 *
703 * - a list of valid ranges for sclk labeled OD_RANGE
704 *
705 * < For VanGogh >
706 *
707 * Reading the file will display:
708 *
709 * - minimum and maximum engine clock labeled OD_SCLK
710 * - minimum and maximum core clocks labeled OD_CCLK
711 *
712 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
713 *
714 * To manually adjust these settings:
715 *
716 * - First select manual using power_dpm_force_performance_level
717 *
718 * - For clock frequency setting, enter a new value by writing a
719 * string that contains "s/m index clock" to the file. The index
720 * should be 0 if to set minimum clock. And 1 if to set maximum
721 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
722 * "m 1 800" will update maximum mclk to be 800Mhz. For core
723 * clocks on VanGogh, the string contains "p core index clock".
724 * E.g., "p 2 0 800" would set the minimum core clock on core
725 * 2 to 800Mhz.
726 *
727 * For sclk voltage curve supported by Vega20 and NV1X, enter the new
728 * values by writing a string that contains "vc point clock voltage"
729 * to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
730 * 600" will update point1 with clock set as 300Mhz and voltage as 600mV.
731 * "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
732 * voltage 1000mV.
733 *
734 * For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
735 * Cavefish and some later SMU13 ASICs, enter the new value by writing a
736 * string that contains "vo offset". E.g., "vo -10" will update the extra
737 * voltage offset applied to the whole v/f curve line as -10mv.
738 *
739 * - When you have edited all of the states as needed, write "c" (commit)
740 * to the file to commit your changes
741 *
742 * - If you want to reset to the default power levels, write "r" (reset)
743 * to the file to reset them
744 *
745 */
746
amdgpu_set_pp_od_clk_voltage(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)747 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
748 struct device_attribute *attr,
749 const char *buf,
750 size_t count)
751 {
752 struct drm_device *ddev = dev_get_drvdata(dev);
753 struct amdgpu_device *adev = drm_to_adev(ddev);
754 int ret;
755 uint32_t parameter_size = 0;
756 long parameter[64];
757 char buf_cpy[128];
758 char *tmp_str;
759 char *sub_str;
760 const char delimiter[3] = {' ', '\n', '\0'};
761 uint32_t type;
762
763 if (count > 127 || count == 0)
764 return -EINVAL;
765
766 if (*buf == 's')
767 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
768 else if (*buf == 'p')
769 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
770 else if (*buf == 'm')
771 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
772 else if (*buf == 'r')
773 type = PP_OD_RESTORE_DEFAULT_TABLE;
774 else if (*buf == 'c')
775 type = PP_OD_COMMIT_DPM_TABLE;
776 else if (!strncmp(buf, "vc", 2))
777 type = PP_OD_EDIT_VDDC_CURVE;
778 else if (!strncmp(buf, "vo", 2))
779 type = PP_OD_EDIT_VDDGFX_OFFSET;
780 else
781 return -EINVAL;
782
783 memcpy(buf_cpy, buf, count);
784 buf_cpy[count] = 0;
785
786 tmp_str = buf_cpy;
787
788 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
789 (type == PP_OD_EDIT_VDDGFX_OFFSET))
790 tmp_str++;
791 while (isspace(*++tmp_str));
792
793 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
794 if (strlen(sub_str) == 0)
795 continue;
796 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
797 if (ret)
798 return -EINVAL;
799 parameter_size++;
800
801 if (!tmp_str)
802 break;
803
804 while (isspace(*tmp_str))
805 tmp_str++;
806 }
807
808 ret = amdgpu_pm_get_access(adev);
809 if (ret < 0)
810 return ret;
811
812 if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
813 type,
814 parameter,
815 parameter_size))
816 goto err_out;
817
818 if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
819 parameter, parameter_size))
820 goto err_out;
821
822 if (type == PP_OD_COMMIT_DPM_TABLE) {
823 if (amdgpu_dpm_dispatch_task(adev,
824 AMD_PP_TASK_READJUST_POWER_STATE,
825 NULL))
826 goto err_out;
827 }
828
829 amdgpu_pm_put_access(adev);
830
831 return count;
832
833 err_out:
834 amdgpu_pm_put_access(adev);
835
836 return -EINVAL;
837 }
838
amdgpu_get_pp_od_clk_voltage(struct device * dev,struct device_attribute * attr,char * buf)839 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
840 struct device_attribute *attr,
841 char *buf)
842 {
843 struct drm_device *ddev = dev_get_drvdata(dev);
844 struct amdgpu_device *adev = drm_to_adev(ddev);
845 int size = 0;
846 int ret;
847 enum pp_clock_type od_clocks[6] = {
848 OD_SCLK,
849 OD_MCLK,
850 OD_VDDC_CURVE,
851 OD_RANGE,
852 OD_VDDGFX_OFFSET,
853 OD_CCLK,
854 };
855 uint clk_index;
856
857 ret = amdgpu_pm_get_access_if_active(adev);
858 if (ret)
859 return ret;
860
861 for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
862 ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
863 if (ret)
864 break;
865 }
866 if (ret == -ENOENT) {
867 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
868 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
869 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
870 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
871 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
872 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
873 }
874
875 if (size == 0)
876 size = sysfs_emit(buf, "\n");
877
878 amdgpu_pm_put_access(adev);
879
880 return size;
881 }
882
883 /**
884 * DOC: pp_features
885 *
886 * The amdgpu driver provides a sysfs API for adjusting what powerplay
887 * features to be enabled. The file pp_features is used for this. And
888 * this is only available for Vega10 and later dGPUs.
889 *
890 * Reading back the file will show you the followings:
891 * - Current ppfeature masks
892 * - List of the all supported powerplay features with their naming,
893 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
894 *
895 * To manually enable or disable a specific feature, just set or clear
896 * the corresponding bit from original ppfeature masks and input the
897 * new ppfeature masks.
898 */
amdgpu_set_pp_features(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)899 static ssize_t amdgpu_set_pp_features(struct device *dev,
900 struct device_attribute *attr,
901 const char *buf,
902 size_t count)
903 {
904 struct drm_device *ddev = dev_get_drvdata(dev);
905 struct amdgpu_device *adev = drm_to_adev(ddev);
906 uint64_t featuremask;
907 int ret;
908
909 ret = kstrtou64(buf, 0, &featuremask);
910 if (ret)
911 return -EINVAL;
912
913 ret = amdgpu_pm_get_access(adev);
914 if (ret < 0)
915 return ret;
916
917 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
918
919 amdgpu_pm_put_access(adev);
920
921 if (ret)
922 return -EINVAL;
923
924 return count;
925 }
926
amdgpu_get_pp_features(struct device * dev,struct device_attribute * attr,char * buf)927 static ssize_t amdgpu_get_pp_features(struct device *dev,
928 struct device_attribute *attr,
929 char *buf)
930 {
931 struct drm_device *ddev = dev_get_drvdata(dev);
932 struct amdgpu_device *adev = drm_to_adev(ddev);
933 ssize_t size;
934 int ret;
935
936 ret = amdgpu_pm_get_access_if_active(adev);
937 if (ret)
938 return ret;
939
940 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
941 if (size <= 0)
942 size = sysfs_emit(buf, "\n");
943
944 amdgpu_pm_put_access(adev);
945
946 return size;
947 }
948
949 /**
950 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
951 *
952 * The amdgpu driver provides a sysfs API for adjusting what power levels
953 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
954 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
955 * this.
956 *
957 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
958 * Vega10 and later ASICs.
959 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
960 *
961 * Reading back the files will show you the available power levels within
962 * the power state and the clock information for those levels. If deep sleep is
963 * applied to a clock, the level will be denoted by a special level 'S:'
964 * E.g., ::
965 *
966 * S: 19Mhz *
967 * 0: 615Mhz
968 * 1: 800Mhz
969 * 2: 888Mhz
970 * 3: 1000Mhz
971 *
972 *
973 * To manually adjust these states, first select manual using
974 * power_dpm_force_performance_level.
975 * Secondly, enter a new value for each level by inputing a string that
976 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
977 * E.g.,
978 *
979 * .. code-block:: bash
980 *
981 * echo "4 5 6" > pp_dpm_sclk
982 *
983 * will enable sclk levels 4, 5, and 6.
984 *
985 * NOTE: change to the dcefclk max dpm level is not supported now
986 */
987
amdgpu_get_pp_dpm_clock(struct device * dev,enum pp_clock_type type,char * buf)988 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
989 enum pp_clock_type type,
990 char *buf)
991 {
992 struct drm_device *ddev = dev_get_drvdata(dev);
993 struct amdgpu_device *adev = drm_to_adev(ddev);
994 int size = 0;
995 int ret = 0;
996
997 ret = amdgpu_pm_get_access_if_active(adev);
998 if (ret)
999 return ret;
1000
1001 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1002 if (ret == -ENOENT)
1003 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1004
1005 if (size == 0)
1006 size = sysfs_emit(buf, "\n");
1007
1008 amdgpu_pm_put_access(adev);
1009
1010 return size;
1011 }
1012
1013 /*
1014 * Worst case: 32 bits individually specified, in octal at 12 characters
1015 * per line (+1 for \n).
1016 */
1017 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1018
amdgpu_read_mask(const char * buf,size_t count,uint32_t * mask)1019 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1020 {
1021 int ret;
1022 unsigned long level;
1023 char *sub_str = NULL;
1024 char *tmp;
1025 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1026 const char delimiter[3] = {' ', '\n', '\0'};
1027 size_t bytes;
1028
1029 *mask = 0;
1030
1031 bytes = min(count, sizeof(buf_cpy) - 1);
1032 memcpy(buf_cpy, buf, bytes);
1033 buf_cpy[bytes] = '\0';
1034 tmp = buf_cpy;
1035 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1036 if (strlen(sub_str)) {
1037 ret = kstrtoul(sub_str, 0, &level);
1038 if (ret || level > 31)
1039 return -EINVAL;
1040 *mask |= 1 << level;
1041 } else
1042 break;
1043 }
1044
1045 return 0;
1046 }
1047
amdgpu_set_pp_dpm_clock(struct device * dev,enum pp_clock_type type,const char * buf,size_t count)1048 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1049 enum pp_clock_type type,
1050 const char *buf,
1051 size_t count)
1052 {
1053 struct drm_device *ddev = dev_get_drvdata(dev);
1054 struct amdgpu_device *adev = drm_to_adev(ddev);
1055 int ret;
1056 uint32_t mask = 0;
1057
1058 ret = amdgpu_read_mask(buf, count, &mask);
1059 if (ret)
1060 return ret;
1061
1062 ret = amdgpu_pm_get_access(adev);
1063 if (ret < 0)
1064 return ret;
1065
1066 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1067
1068 amdgpu_pm_put_access(adev);
1069
1070 if (ret)
1071 return -EINVAL;
1072
1073 return count;
1074 }
1075
amdgpu_get_pp_dpm_sclk(struct device * dev,struct device_attribute * attr,char * buf)1076 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1077 struct device_attribute *attr,
1078 char *buf)
1079 {
1080 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1081 }
1082
amdgpu_set_pp_dpm_sclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1083 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1084 struct device_attribute *attr,
1085 const char *buf,
1086 size_t count)
1087 {
1088 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1089 }
1090
amdgpu_get_pp_dpm_mclk(struct device * dev,struct device_attribute * attr,char * buf)1091 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1092 struct device_attribute *attr,
1093 char *buf)
1094 {
1095 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1096 }
1097
amdgpu_set_pp_dpm_mclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1098 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1099 struct device_attribute *attr,
1100 const char *buf,
1101 size_t count)
1102 {
1103 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1104 }
1105
amdgpu_get_pp_dpm_socclk(struct device * dev,struct device_attribute * attr,char * buf)1106 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1107 struct device_attribute *attr,
1108 char *buf)
1109 {
1110 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1111 }
1112
amdgpu_set_pp_dpm_socclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1113 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1114 struct device_attribute *attr,
1115 const char *buf,
1116 size_t count)
1117 {
1118 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1119 }
1120
amdgpu_get_pp_dpm_fclk(struct device * dev,struct device_attribute * attr,char * buf)1121 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1122 struct device_attribute *attr,
1123 char *buf)
1124 {
1125 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1126 }
1127
amdgpu_set_pp_dpm_fclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1128 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1129 struct device_attribute *attr,
1130 const char *buf,
1131 size_t count)
1132 {
1133 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1134 }
1135
amdgpu_get_pp_dpm_vclk(struct device * dev,struct device_attribute * attr,char * buf)1136 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1137 struct device_attribute *attr,
1138 char *buf)
1139 {
1140 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1141 }
1142
amdgpu_set_pp_dpm_vclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1143 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1144 struct device_attribute *attr,
1145 const char *buf,
1146 size_t count)
1147 {
1148 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1149 }
1150
amdgpu_get_pp_dpm_vclk1(struct device * dev,struct device_attribute * attr,char * buf)1151 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1152 struct device_attribute *attr,
1153 char *buf)
1154 {
1155 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1156 }
1157
amdgpu_set_pp_dpm_vclk1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1158 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1159 struct device_attribute *attr,
1160 const char *buf,
1161 size_t count)
1162 {
1163 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1164 }
1165
amdgpu_get_pp_dpm_dclk(struct device * dev,struct device_attribute * attr,char * buf)1166 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1167 struct device_attribute *attr,
1168 char *buf)
1169 {
1170 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1171 }
1172
amdgpu_set_pp_dpm_dclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1173 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1174 struct device_attribute *attr,
1175 const char *buf,
1176 size_t count)
1177 {
1178 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1179 }
1180
amdgpu_get_pp_dpm_dclk1(struct device * dev,struct device_attribute * attr,char * buf)1181 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1182 struct device_attribute *attr,
1183 char *buf)
1184 {
1185 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1186 }
1187
amdgpu_set_pp_dpm_dclk1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1188 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1189 struct device_attribute *attr,
1190 const char *buf,
1191 size_t count)
1192 {
1193 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1194 }
1195
amdgpu_get_pp_dpm_dcefclk(struct device * dev,struct device_attribute * attr,char * buf)1196 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1197 struct device_attribute *attr,
1198 char *buf)
1199 {
1200 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1201 }
1202
amdgpu_set_pp_dpm_dcefclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1203 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1204 struct device_attribute *attr,
1205 const char *buf,
1206 size_t count)
1207 {
1208 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1209 }
1210
amdgpu_get_pp_dpm_pcie(struct device * dev,struct device_attribute * attr,char * buf)1211 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1212 struct device_attribute *attr,
1213 char *buf)
1214 {
1215 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1216 }
1217
amdgpu_set_pp_dpm_pcie(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1218 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1219 struct device_attribute *attr,
1220 const char *buf,
1221 size_t count)
1222 {
1223 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1224 }
1225
amdgpu_get_pp_sclk_od(struct device * dev,struct device_attribute * attr,char * buf)1226 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1227 struct device_attribute *attr,
1228 char *buf)
1229 {
1230 struct drm_device *ddev = dev_get_drvdata(dev);
1231 struct amdgpu_device *adev = drm_to_adev(ddev);
1232 uint32_t value = 0;
1233 int ret;
1234
1235 ret = amdgpu_pm_get_access_if_active(adev);
1236 if (ret)
1237 return ret;
1238
1239 value = amdgpu_dpm_get_sclk_od(adev);
1240
1241 amdgpu_pm_put_access(adev);
1242
1243 return sysfs_emit(buf, "%d\n", value);
1244 }
1245
amdgpu_set_pp_sclk_od(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1246 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1247 struct device_attribute *attr,
1248 const char *buf,
1249 size_t count)
1250 {
1251 struct drm_device *ddev = dev_get_drvdata(dev);
1252 struct amdgpu_device *adev = drm_to_adev(ddev);
1253 int ret;
1254 long int value;
1255
1256 ret = kstrtol(buf, 0, &value);
1257
1258 if (ret)
1259 return -EINVAL;
1260
1261 ret = amdgpu_pm_get_access(adev);
1262 if (ret < 0)
1263 return ret;
1264
1265 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1266
1267 amdgpu_pm_put_access(adev);
1268
1269 return count;
1270 }
1271
amdgpu_get_pp_mclk_od(struct device * dev,struct device_attribute * attr,char * buf)1272 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1273 struct device_attribute *attr,
1274 char *buf)
1275 {
1276 struct drm_device *ddev = dev_get_drvdata(dev);
1277 struct amdgpu_device *adev = drm_to_adev(ddev);
1278 uint32_t value = 0;
1279 int ret;
1280
1281 ret = amdgpu_pm_get_access_if_active(adev);
1282 if (ret)
1283 return ret;
1284
1285 value = amdgpu_dpm_get_mclk_od(adev);
1286
1287 amdgpu_pm_put_access(adev);
1288
1289 return sysfs_emit(buf, "%d\n", value);
1290 }
1291
amdgpu_set_pp_mclk_od(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1292 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1293 struct device_attribute *attr,
1294 const char *buf,
1295 size_t count)
1296 {
1297 struct drm_device *ddev = dev_get_drvdata(dev);
1298 struct amdgpu_device *adev = drm_to_adev(ddev);
1299 int ret;
1300 long int value;
1301
1302 ret = kstrtol(buf, 0, &value);
1303
1304 if (ret)
1305 return -EINVAL;
1306
1307 ret = amdgpu_pm_get_access(adev);
1308 if (ret < 0)
1309 return ret;
1310
1311 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1312
1313 amdgpu_pm_put_access(adev);
1314
1315 return count;
1316 }
1317
1318 /**
1319 * DOC: pp_power_profile_mode
1320 *
1321 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1322 * related to switching between power levels in a power state. The file
1323 * pp_power_profile_mode is used for this.
1324 *
1325 * Reading this file outputs a list of all of the predefined power profiles
1326 * and the relevant heuristics settings for that profile.
1327 *
1328 * To select a profile or create a custom profile, first select manual using
1329 * power_dpm_force_performance_level. Writing the number of a predefined
1330 * profile to pp_power_profile_mode will enable those heuristics. To
1331 * create a custom set of heuristics, write a string of numbers to the file
1332 * starting with the number of the custom profile along with a setting
1333 * for each heuristic parameter. Due to differences across asic families
1334 * the heuristic parameters vary from family to family. Additionally,
1335 * you can apply the custom heuristics to different clock domains. Each
1336 * clock domain is considered a distinct operation so if you modify the
1337 * gfxclk heuristics and then the memclk heuristics, the all of the
1338 * custom heuristics will be retained until you switch to another profile.
1339 *
1340 */
1341
amdgpu_get_pp_power_profile_mode(struct device * dev,struct device_attribute * attr,char * buf)1342 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1343 struct device_attribute *attr,
1344 char *buf)
1345 {
1346 struct drm_device *ddev = dev_get_drvdata(dev);
1347 struct amdgpu_device *adev = drm_to_adev(ddev);
1348 ssize_t size;
1349 int ret;
1350
1351 ret = amdgpu_pm_get_access_if_active(adev);
1352 if (ret)
1353 return ret;
1354
1355 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1356 if (size <= 0)
1357 size = sysfs_emit(buf, "\n");
1358
1359 amdgpu_pm_put_access(adev);
1360
1361 return size;
1362 }
1363
1364
amdgpu_set_pp_power_profile_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1365 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1366 struct device_attribute *attr,
1367 const char *buf,
1368 size_t count)
1369 {
1370 int ret;
1371 struct drm_device *ddev = dev_get_drvdata(dev);
1372 struct amdgpu_device *adev = drm_to_adev(ddev);
1373 uint32_t parameter_size = 0;
1374 long parameter[64];
1375 char *sub_str, buf_cpy[128];
1376 char *tmp_str;
1377 uint32_t i = 0;
1378 char tmp[2];
1379 long int profile_mode = 0;
1380 const char delimiter[3] = {' ', '\n', '\0'};
1381
1382 tmp[0] = *(buf);
1383 tmp[1] = '\0';
1384 ret = kstrtol(tmp, 0, &profile_mode);
1385 if (ret)
1386 return -EINVAL;
1387
1388 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1389 if (count < 2 || count > 127)
1390 return -EINVAL;
1391 while (isspace(*++buf))
1392 i++;
1393 memcpy(buf_cpy, buf, count-i);
1394 tmp_str = buf_cpy;
1395 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1396 if (strlen(sub_str) == 0)
1397 continue;
1398 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1399 if (ret)
1400 return -EINVAL;
1401 parameter_size++;
1402 if (!tmp_str)
1403 break;
1404 while (isspace(*tmp_str))
1405 tmp_str++;
1406 }
1407 }
1408 parameter[parameter_size] = profile_mode;
1409
1410 ret = amdgpu_pm_get_access(adev);
1411 if (ret < 0)
1412 return ret;
1413
1414 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1415
1416 amdgpu_pm_put_access(adev);
1417
1418 if (!ret)
1419 return count;
1420
1421 return -EINVAL;
1422 }
1423
amdgpu_pm_get_sensor_generic(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * query)1424 static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev,
1425 enum amd_pp_sensors sensor,
1426 void *query)
1427 {
1428 int r, size = sizeof(uint32_t);
1429
1430 r = amdgpu_pm_get_access_if_active(adev);
1431 if (r)
1432 return r;
1433
1434 /* get the sensor value */
1435 r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1436
1437 amdgpu_pm_put_access(adev);
1438
1439 return r;
1440 }
1441
1442 /**
1443 * DOC: gpu_busy_percent
1444 *
1445 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1446 * is as a percentage. The file gpu_busy_percent is used for this.
1447 * The SMU firmware computes a percentage of load based on the
1448 * aggregate activity level in the IP cores.
1449 */
amdgpu_get_gpu_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1450 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1451 struct device_attribute *attr,
1452 char *buf)
1453 {
1454 struct drm_device *ddev = dev_get_drvdata(dev);
1455 struct amdgpu_device *adev = drm_to_adev(ddev);
1456 unsigned int value;
1457 int r;
1458
1459 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1460 if (r)
1461 return r;
1462
1463 return sysfs_emit(buf, "%d\n", value);
1464 }
1465
1466 /**
1467 * DOC: mem_busy_percent
1468 *
1469 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1470 * is as a percentage. The file mem_busy_percent is used for this.
1471 * The SMU firmware computes a percentage of load based on the
1472 * aggregate activity level in the IP cores.
1473 */
amdgpu_get_mem_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1474 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1475 struct device_attribute *attr,
1476 char *buf)
1477 {
1478 struct drm_device *ddev = dev_get_drvdata(dev);
1479 struct amdgpu_device *adev = drm_to_adev(ddev);
1480 unsigned int value;
1481 int r;
1482
1483 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1484 if (r)
1485 return r;
1486
1487 return sysfs_emit(buf, "%d\n", value);
1488 }
1489
1490 /**
1491 * DOC: vcn_busy_percent
1492 *
1493 * The amdgpu driver provides a sysfs API for reading how busy the VCN
1494 * is as a percentage. The file vcn_busy_percent is used for this.
1495 * The SMU firmware computes a percentage of load based on the
1496 * aggregate activity level in the IP cores.
1497 */
amdgpu_get_vcn_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1498 static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
1499 struct device_attribute *attr,
1500 char *buf)
1501 {
1502 struct drm_device *ddev = dev_get_drvdata(dev);
1503 struct amdgpu_device *adev = drm_to_adev(ddev);
1504 unsigned int value;
1505 int r;
1506
1507 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
1508 if (r)
1509 return r;
1510
1511 return sysfs_emit(buf, "%d\n", value);
1512 }
1513
1514 /**
1515 * DOC: pcie_bw
1516 *
1517 * The amdgpu driver provides a sysfs API for estimating how much data
1518 * has been received and sent by the GPU in the last second through PCIe.
1519 * The file pcie_bw is used for this.
1520 * The Perf counters count the number of received and sent messages and return
1521 * those values, as well as the maximum payload size of a PCIe packet (mps).
1522 * Note that it is not possible to easily and quickly obtain the size of each
1523 * packet transmitted, so we output the max payload size (mps) to allow for
1524 * quick estimation of the PCIe bandwidth usage
1525 */
amdgpu_get_pcie_bw(struct device * dev,struct device_attribute * attr,char * buf)1526 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1527 struct device_attribute *attr,
1528 char *buf)
1529 {
1530 struct drm_device *ddev = dev_get_drvdata(dev);
1531 struct amdgpu_device *adev = drm_to_adev(ddev);
1532 uint64_t count0 = 0, count1 = 0;
1533 int ret;
1534
1535 if (adev->flags & AMD_IS_APU)
1536 return -ENODATA;
1537
1538 if (!adev->asic_funcs->get_pcie_usage)
1539 return -ENODATA;
1540
1541 ret = amdgpu_pm_get_access_if_active(adev);
1542 if (ret)
1543 return ret;
1544
1545 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1546
1547 amdgpu_pm_put_access(adev);
1548
1549 return sysfs_emit(buf, "%llu %llu %i\n",
1550 count0, count1, pcie_get_mps(adev->pdev));
1551 }
1552
1553 /**
1554 * DOC: unique_id
1555 *
1556 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1557 * The file unique_id is used for this.
1558 * This will provide a Unique ID that will persist from machine to machine
1559 *
1560 * NOTE: This will only work for GFX9 and newer. This file will be absent
1561 * on unsupported ASICs (GFX8 and older)
1562 */
amdgpu_get_unique_id(struct device * dev,struct device_attribute * attr,char * buf)1563 static ssize_t amdgpu_get_unique_id(struct device *dev,
1564 struct device_attribute *attr,
1565 char *buf)
1566 {
1567 struct drm_device *ddev = dev_get_drvdata(dev);
1568 struct amdgpu_device *adev = drm_to_adev(ddev);
1569
1570 if (adev->unique_id)
1571 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1572
1573 return 0;
1574 }
1575
1576 /**
1577 * DOC: thermal_throttling_logging
1578 *
1579 * Thermal throttling pulls down the clock frequency and thus the performance.
1580 * It's an useful mechanism to protect the chip from overheating. Since it
1581 * impacts performance, the user controls whether it is enabled and if so,
1582 * the log frequency.
1583 *
1584 * Reading back the file shows you the status(enabled or disabled) and
1585 * the interval(in seconds) between each thermal logging.
1586 *
1587 * Writing an integer to the file, sets a new logging interval, in seconds.
1588 * The value should be between 1 and 3600. If the value is less than 1,
1589 * thermal logging is disabled. Values greater than 3600 are ignored.
1590 */
amdgpu_get_thermal_throttling_logging(struct device * dev,struct device_attribute * attr,char * buf)1591 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1592 struct device_attribute *attr,
1593 char *buf)
1594 {
1595 struct drm_device *ddev = dev_get_drvdata(dev);
1596 struct amdgpu_device *adev = drm_to_adev(ddev);
1597
1598 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1599 adev_to_drm(adev)->unique,
1600 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1601 adev->throttling_logging_rs.interval / HZ + 1);
1602 }
1603
amdgpu_set_thermal_throttling_logging(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1604 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1605 struct device_attribute *attr,
1606 const char *buf,
1607 size_t count)
1608 {
1609 struct drm_device *ddev = dev_get_drvdata(dev);
1610 struct amdgpu_device *adev = drm_to_adev(ddev);
1611 long throttling_logging_interval;
1612 int ret = 0;
1613
1614 ret = kstrtol(buf, 0, &throttling_logging_interval);
1615 if (ret)
1616 return ret;
1617
1618 if (throttling_logging_interval > 3600)
1619 return -EINVAL;
1620
1621 if (throttling_logging_interval > 0) {
1622 /*
1623 * Reset the ratelimit timer internals.
1624 * This can effectively restart the timer.
1625 */
1626 ratelimit_state_reset_interval(&adev->throttling_logging_rs,
1627 (throttling_logging_interval - 1) * HZ);
1628 atomic_set(&adev->throttling_logging_enabled, 1);
1629 } else {
1630 atomic_set(&adev->throttling_logging_enabled, 0);
1631 }
1632
1633 return count;
1634 }
1635
1636 /**
1637 * DOC: apu_thermal_cap
1638 *
1639 * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1640 * limit temperature in millidegrees Celsius
1641 *
1642 * Reading back the file shows you core limit value
1643 *
1644 * Writing an integer to the file, sets a new thermal limit. The value
1645 * should be between 0 and 100. If the value is less than 0 or greater
1646 * than 100, then the write request will be ignored.
1647 */
amdgpu_get_apu_thermal_cap(struct device * dev,struct device_attribute * attr,char * buf)1648 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1649 struct device_attribute *attr,
1650 char *buf)
1651 {
1652 int ret, size;
1653 u32 limit;
1654 struct drm_device *ddev = dev_get_drvdata(dev);
1655 struct amdgpu_device *adev = drm_to_adev(ddev);
1656
1657 ret = amdgpu_pm_get_access_if_active(adev);
1658 if (ret)
1659 return ret;
1660
1661 ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1662 if (!ret)
1663 size = sysfs_emit(buf, "%u\n", limit);
1664 else
1665 size = sysfs_emit(buf, "failed to get thermal limit\n");
1666
1667 amdgpu_pm_put_access(adev);
1668
1669 return size;
1670 }
1671
amdgpu_set_apu_thermal_cap(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1672 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1673 struct device_attribute *attr,
1674 const char *buf,
1675 size_t count)
1676 {
1677 int ret;
1678 u32 value;
1679 struct drm_device *ddev = dev_get_drvdata(dev);
1680 struct amdgpu_device *adev = drm_to_adev(ddev);
1681
1682 ret = kstrtou32(buf, 10, &value);
1683 if (ret)
1684 return ret;
1685
1686 if (value > 100) {
1687 dev_err(dev, "Invalid argument !\n");
1688 return -EINVAL;
1689 }
1690
1691 ret = amdgpu_pm_get_access(adev);
1692 if (ret < 0)
1693 return ret;
1694
1695 ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1696 if (ret) {
1697 amdgpu_pm_put_access(adev);
1698 dev_err(dev, "failed to update thermal limit\n");
1699 return ret;
1700 }
1701
1702 amdgpu_pm_put_access(adev);
1703
1704 return count;
1705 }
1706
amdgpu_pm_metrics_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1707 static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
1708 struct amdgpu_device_attr *attr,
1709 uint32_t mask,
1710 enum amdgpu_device_attr_states *states)
1711 {
1712 if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
1713 *states = ATTR_STATE_UNSUPPORTED;
1714
1715 return 0;
1716 }
1717
amdgpu_get_pm_metrics(struct device * dev,struct device_attribute * attr,char * buf)1718 static ssize_t amdgpu_get_pm_metrics(struct device *dev,
1719 struct device_attribute *attr, char *buf)
1720 {
1721 struct drm_device *ddev = dev_get_drvdata(dev);
1722 struct amdgpu_device *adev = drm_to_adev(ddev);
1723 ssize_t size = 0;
1724 int ret;
1725
1726 ret = amdgpu_pm_get_access_if_active(adev);
1727 if (ret)
1728 return ret;
1729
1730 size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
1731
1732 amdgpu_pm_put_access(adev);
1733
1734 return size;
1735 }
1736
1737 /**
1738 * DOC: gpu_metrics
1739 *
1740 * The amdgpu driver provides a sysfs API for retrieving current gpu
1741 * metrics data. The file gpu_metrics is used for this. Reading the
1742 * file will dump all the current gpu metrics data.
1743 *
1744 * These data include temperature, frequency, engines utilization,
1745 * power consume, throttler status, fan speed and cpu core statistics(
1746 * available for APU only). That's it will give a snapshot of all sensors
1747 * at the same time.
1748 */
amdgpu_get_gpu_metrics(struct device * dev,struct device_attribute * attr,char * buf)1749 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1750 struct device_attribute *attr,
1751 char *buf)
1752 {
1753 struct drm_device *ddev = dev_get_drvdata(dev);
1754 struct amdgpu_device *adev = drm_to_adev(ddev);
1755 void *gpu_metrics;
1756 ssize_t size = 0;
1757 int ret;
1758
1759 ret = amdgpu_pm_get_access_if_active(adev);
1760 if (ret)
1761 return ret;
1762
1763 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1764 if (size <= 0)
1765 goto out;
1766
1767 if (size >= PAGE_SIZE)
1768 size = PAGE_SIZE - 1;
1769
1770 memcpy(buf, gpu_metrics, size);
1771
1772 out:
1773 amdgpu_pm_put_access(adev);
1774
1775 return size;
1776 }
1777
amdgpu_show_powershift_percent(struct device * dev,char * buf,enum amd_pp_sensors sensor)1778 static int amdgpu_show_powershift_percent(struct device *dev,
1779 char *buf, enum amd_pp_sensors sensor)
1780 {
1781 struct drm_device *ddev = dev_get_drvdata(dev);
1782 struct amdgpu_device *adev = drm_to_adev(ddev);
1783 uint32_t ss_power;
1784 int r = 0, i;
1785
1786 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1787 if (r == -EOPNOTSUPP) {
1788 /* sensor not available on dGPU, try to read from APU */
1789 adev = NULL;
1790 mutex_lock(&mgpu_info.mutex);
1791 for (i = 0; i < mgpu_info.num_gpu; i++) {
1792 if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1793 adev = mgpu_info.gpu_ins[i].adev;
1794 break;
1795 }
1796 }
1797 mutex_unlock(&mgpu_info.mutex);
1798 if (adev)
1799 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1800 }
1801
1802 if (r)
1803 return r;
1804
1805 return sysfs_emit(buf, "%u%%\n", ss_power);
1806 }
1807
1808 /**
1809 * DOC: smartshift_apu_power
1810 *
1811 * The amdgpu driver provides a sysfs API for reporting APU power
1812 * shift in percentage if platform supports smartshift. Value 0 means that
1813 * there is no powershift and values between [1-100] means that the power
1814 * is shifted to APU, the percentage of boost is with respect to APU power
1815 * limit on the platform.
1816 */
1817
amdgpu_get_smartshift_apu_power(struct device * dev,struct device_attribute * attr,char * buf)1818 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1819 char *buf)
1820 {
1821 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1822 }
1823
1824 /**
1825 * DOC: smartshift_dgpu_power
1826 *
1827 * The amdgpu driver provides a sysfs API for reporting dGPU power
1828 * shift in percentage if platform supports smartshift. Value 0 means that
1829 * there is no powershift and values between [1-100] means that the power is
1830 * shifted to dGPU, the percentage of boost is with respect to dGPU power
1831 * limit on the platform.
1832 */
1833
amdgpu_get_smartshift_dgpu_power(struct device * dev,struct device_attribute * attr,char * buf)1834 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1835 char *buf)
1836 {
1837 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1838 }
1839
1840 /**
1841 * DOC: smartshift_bias
1842 *
1843 * The amdgpu driver provides a sysfs API for reporting the
1844 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1845 * and the default is 0. -100 sets maximum preference to APU
1846 * and 100 sets max perference to dGPU.
1847 */
1848
amdgpu_get_smartshift_bias(struct device * dev,struct device_attribute * attr,char * buf)1849 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1850 struct device_attribute *attr,
1851 char *buf)
1852 {
1853 int r = 0;
1854
1855 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1856
1857 return r;
1858 }
1859
amdgpu_set_smartshift_bias(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1860 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1861 struct device_attribute *attr,
1862 const char *buf, size_t count)
1863 {
1864 struct drm_device *ddev = dev_get_drvdata(dev);
1865 struct amdgpu_device *adev = drm_to_adev(ddev);
1866 int r = 0;
1867 int bias = 0;
1868
1869 r = kstrtoint(buf, 10, &bias);
1870 if (r)
1871 goto out;
1872
1873 r = amdgpu_pm_get_access(adev);
1874 if (r < 0)
1875 return r;
1876
1877 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1878 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1879 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1880 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1881
1882 amdgpu_smartshift_bias = bias;
1883 r = count;
1884
1885 /* TODO: update bias level with SMU message */
1886
1887 out:
1888 amdgpu_pm_put_access(adev);
1889
1890 return r;
1891 }
1892
ss_power_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1893 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1894 uint32_t mask, enum amdgpu_device_attr_states *states)
1895 {
1896 if (!amdgpu_device_supports_smart_shift(adev))
1897 *states = ATTR_STATE_UNSUPPORTED;
1898
1899 return 0;
1900 }
1901
ss_bias_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1902 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1903 uint32_t mask, enum amdgpu_device_attr_states *states)
1904 {
1905 uint32_t ss_power;
1906
1907 if (!amdgpu_device_supports_smart_shift(adev))
1908 *states = ATTR_STATE_UNSUPPORTED;
1909 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1910 (void *)&ss_power))
1911 *states = ATTR_STATE_UNSUPPORTED;
1912 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1913 (void *)&ss_power))
1914 *states = ATTR_STATE_UNSUPPORTED;
1915
1916 return 0;
1917 }
1918
pp_od_clk_voltage_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1919 static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1920 uint32_t mask, enum amdgpu_device_attr_states *states)
1921 {
1922 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1923
1924 *states = ATTR_STATE_SUPPORTED;
1925
1926 if (!amdgpu_dpm_is_overdrive_supported(adev)) {
1927 *states = ATTR_STATE_UNSUPPORTED;
1928 return 0;
1929 }
1930
1931 /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
1932 if (gc_ver == IP_VERSION(9, 4, 3) ||
1933 gc_ver == IP_VERSION(9, 4, 4) ||
1934 gc_ver == IP_VERSION(9, 5, 0)) {
1935 if (amdgpu_sriov_multi_vf_mode(adev))
1936 *states = ATTR_STATE_UNSUPPORTED;
1937 return 0;
1938 }
1939
1940 if (!(attr->flags & mask))
1941 *states = ATTR_STATE_UNSUPPORTED;
1942
1943 return 0;
1944 }
1945
pp_dpm_dcefclk_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1946 static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1947 uint32_t mask, enum amdgpu_device_attr_states *states)
1948 {
1949 struct device_attribute *dev_attr = &attr->dev_attr;
1950 uint32_t gc_ver;
1951
1952 *states = ATTR_STATE_SUPPORTED;
1953
1954 if (!(attr->flags & mask)) {
1955 *states = ATTR_STATE_UNSUPPORTED;
1956 return 0;
1957 }
1958
1959 gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1960 /* dcefclk node is not available on gfx 11.0.3 sriov */
1961 if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
1962 gc_ver < IP_VERSION(9, 0, 0) ||
1963 !amdgpu_device_has_display_hardware(adev))
1964 *states = ATTR_STATE_UNSUPPORTED;
1965
1966 /* SMU MP1 does not support dcefclk level setting,
1967 * setting should not be allowed from VF if not in one VF mode.
1968 */
1969 if (gc_ver >= IP_VERSION(10, 0, 0) ||
1970 (amdgpu_sriov_multi_vf_mode(adev))) {
1971 dev_attr->attr.mode &= ~S_IWUGO;
1972 dev_attr->store = NULL;
1973 }
1974
1975 return 0;
1976 }
1977
pp_dpm_clk_default_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1978 static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1979 uint32_t mask, enum amdgpu_device_attr_states *states)
1980 {
1981 struct device_attribute *dev_attr = &attr->dev_attr;
1982 enum amdgpu_device_attr_id attr_id = attr->attr_id;
1983 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
1984 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1985
1986 *states = ATTR_STATE_SUPPORTED;
1987
1988 if (!(attr->flags & mask)) {
1989 *states = ATTR_STATE_UNSUPPORTED;
1990 return 0;
1991 }
1992
1993 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1994 if (gc_ver < IP_VERSION(9, 0, 0))
1995 *states = ATTR_STATE_UNSUPPORTED;
1996 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1997 if (mp1_ver < IP_VERSION(10, 0, 0))
1998 *states = ATTR_STATE_UNSUPPORTED;
1999 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2000 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2001 gc_ver == IP_VERSION(10, 3, 3) ||
2002 gc_ver == IP_VERSION(10, 3, 6) ||
2003 gc_ver == IP_VERSION(10, 3, 7) ||
2004 gc_ver == IP_VERSION(10, 3, 0) ||
2005 gc_ver == IP_VERSION(10, 1, 2) ||
2006 gc_ver == IP_VERSION(11, 0, 0) ||
2007 gc_ver == IP_VERSION(11, 0, 1) ||
2008 gc_ver == IP_VERSION(11, 0, 4) ||
2009 gc_ver == IP_VERSION(11, 5, 0) ||
2010 gc_ver == IP_VERSION(11, 0, 2) ||
2011 gc_ver == IP_VERSION(11, 0, 3) ||
2012 gc_ver == IP_VERSION(9, 4, 3) ||
2013 gc_ver == IP_VERSION(9, 4, 4) ||
2014 gc_ver == IP_VERSION(9, 5, 0)))
2015 *states = ATTR_STATE_UNSUPPORTED;
2016 } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2017 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2018 gc_ver == IP_VERSION(10, 3, 0) ||
2019 gc_ver == IP_VERSION(11, 0, 2) ||
2020 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2021 *states = ATTR_STATE_UNSUPPORTED;
2022 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2023 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2024 gc_ver == IP_VERSION(10, 3, 3) ||
2025 gc_ver == IP_VERSION(10, 3, 6) ||
2026 gc_ver == IP_VERSION(10, 3, 7) ||
2027 gc_ver == IP_VERSION(10, 3, 0) ||
2028 gc_ver == IP_VERSION(10, 1, 2) ||
2029 gc_ver == IP_VERSION(11, 0, 0) ||
2030 gc_ver == IP_VERSION(11, 0, 1) ||
2031 gc_ver == IP_VERSION(11, 0, 4) ||
2032 gc_ver == IP_VERSION(11, 5, 0) ||
2033 gc_ver == IP_VERSION(11, 0, 2) ||
2034 gc_ver == IP_VERSION(11, 0, 3) ||
2035 gc_ver == IP_VERSION(9, 4, 3) ||
2036 gc_ver == IP_VERSION(9, 4, 4) ||
2037 gc_ver == IP_VERSION(9, 5, 0)))
2038 *states = ATTR_STATE_UNSUPPORTED;
2039 } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2040 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2041 gc_ver == IP_VERSION(10, 3, 0) ||
2042 gc_ver == IP_VERSION(11, 0, 2) ||
2043 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2044 *states = ATTR_STATE_UNSUPPORTED;
2045 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2046 if (gc_ver == IP_VERSION(9, 4, 2) ||
2047 gc_ver == IP_VERSION(9, 4, 3) ||
2048 gc_ver == IP_VERSION(9, 4, 4) ||
2049 gc_ver == IP_VERSION(9, 5, 0))
2050 *states = ATTR_STATE_UNSUPPORTED;
2051 }
2052
2053 switch (gc_ver) {
2054 case IP_VERSION(9, 4, 1):
2055 case IP_VERSION(9, 4, 2):
2056 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2057 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2058 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2059 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2060 dev_attr->attr.mode &= ~S_IWUGO;
2061 dev_attr->store = NULL;
2062 }
2063 break;
2064 default:
2065 break;
2066 }
2067
2068 /* setting should not be allowed from VF if not in one VF mode */
2069 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
2070 dev_attr->attr.mode &= ~S_IWUGO;
2071 dev_attr->store = NULL;
2072 }
2073
2074 return 0;
2075 }
2076
2077 /**
2078 * DOC: board
2079 *
2080 * Certain SOCs can support various board attributes reporting. This is useful
2081 * for user application to monitor various board reated attributes.
2082 *
2083 * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
2084 * seven types of attributes are reported. Baseboard temperature and
2085 * gpu board temperature are reported as binary files. Npm status, current node power limit,
2086 * max node power limit, node power and global ppt residency is reported as ASCII text file.
2087 *
2088 * * .. code-block:: console
2089 *
2090 * hexdump /sys/bus/pci/devices/.../board/baseboard_temp
2091 *
2092 * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
2093 *
2094 * hexdump /sys/bus/pci/devices/.../board/npm_status
2095 *
2096 * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit
2097 *
2098 * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit
2099 *
2100 * hexdump /sys/bus/pci/devices/.../board/node_power
2101 *
2102 * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid
2103 */
2104
2105 /**
2106 * DOC: baseboard_temp
2107 *
2108 * The amdgpu driver provides a sysfs API for retrieving current baseboard
2109 * temperature metrics data. The file baseboard_temp is used for this.
2110 * Reading the file will dump all the current baseboard temperature metrics data.
2111 */
amdgpu_get_baseboard_temp_metrics(struct device * dev,struct device_attribute * attr,char * buf)2112 static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
2113 struct device_attribute *attr, char *buf)
2114 {
2115 struct drm_device *ddev = dev_get_drvdata(dev);
2116 struct amdgpu_device *adev = drm_to_adev(ddev);
2117 ssize_t size;
2118 int ret;
2119
2120 ret = amdgpu_pm_get_access_if_active(adev);
2121 if (ret)
2122 return ret;
2123
2124 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
2125 if (size <= 0)
2126 goto out;
2127 if (size >= PAGE_SIZE) {
2128 ret = -ENOSPC;
2129 goto out;
2130 }
2131
2132 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
2133
2134 out:
2135 amdgpu_pm_put_access(adev);
2136
2137 if (ret)
2138 return ret;
2139
2140 return size;
2141 }
2142
2143 /**
2144 * DOC: gpuboard_temp
2145 *
2146 * The amdgpu driver provides a sysfs API for retrieving current gpuboard
2147 * temperature metrics data. The file gpuboard_temp is used for this.
2148 * Reading the file will dump all the current gpuboard temperature metrics data.
2149 */
amdgpu_get_gpuboard_temp_metrics(struct device * dev,struct device_attribute * attr,char * buf)2150 static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
2151 struct device_attribute *attr, char *buf)
2152 {
2153 struct drm_device *ddev = dev_get_drvdata(dev);
2154 struct amdgpu_device *adev = drm_to_adev(ddev);
2155 ssize_t size;
2156 int ret;
2157
2158 ret = amdgpu_pm_get_access_if_active(adev);
2159 if (ret)
2160 return ret;
2161
2162 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
2163 if (size <= 0)
2164 goto out;
2165 if (size >= PAGE_SIZE) {
2166 ret = -ENOSPC;
2167 goto out;
2168 }
2169
2170 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
2171
2172 out:
2173 amdgpu_pm_put_access(adev);
2174
2175 if (ret)
2176 return ret;
2177
2178 return size;
2179 }
2180
2181 /**
2182 * DOC: cur_node_power_limit
2183 *
2184 * The amdgpu driver provides a sysfs API for retrieving current node power limit.
2185 * The file cur_node_power_limit is used for this.
2186 */
amdgpu_show_cur_node_power_limit(struct device * dev,struct device_attribute * attr,char * buf)2187 static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev,
2188 struct device_attribute *attr, char *buf)
2189 {
2190 struct drm_device *ddev = dev_get_drvdata(dev);
2191 struct amdgpu_device *adev = drm_to_adev(ddev);
2192 u32 nplimit;
2193 int r;
2194
2195 /* get the current node power limit */
2196 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
2197 (void *)&nplimit);
2198 if (r)
2199 return r;
2200
2201 return sysfs_emit(buf, "%u\n", nplimit);
2202 }
2203
2204 /**
2205 * DOC: node_power
2206 *
2207 * The amdgpu driver provides a sysfs API for retrieving current node power.
2208 * The file node_power is used for this.
2209 */
amdgpu_show_node_power(struct device * dev,struct device_attribute * attr,char * buf)2210 static ssize_t amdgpu_show_node_power(struct device *dev,
2211 struct device_attribute *attr, char *buf)
2212 {
2213 struct drm_device *ddev = dev_get_drvdata(dev);
2214 struct amdgpu_device *adev = drm_to_adev(ddev);
2215 u32 npower;
2216 int r;
2217
2218 /* get the node power */
2219 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2220 (void *)&npower);
2221 if (r)
2222 return r;
2223
2224 return sysfs_emit(buf, "%u\n", npower);
2225 }
2226
2227 /**
2228 * DOC: npm_status
2229 *
2230 * The amdgpu driver provides a sysfs API for retrieving current node power management status.
2231 * The file npm_status is used for this. It shows the status as enabled or disabled based on
2232 * current node power value. If node power is zero, status is disabled else enabled.
2233 */
amdgpu_show_npm_status(struct device * dev,struct device_attribute * attr,char * buf)2234 static ssize_t amdgpu_show_npm_status(struct device *dev,
2235 struct device_attribute *attr, char *buf)
2236 {
2237 struct drm_device *ddev = dev_get_drvdata(dev);
2238 struct amdgpu_device *adev = drm_to_adev(ddev);
2239 u32 npower;
2240 int r;
2241
2242 /* get the node power */
2243 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2244 (void *)&npower);
2245 if (r)
2246 return r;
2247
2248 return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled");
2249 }
2250
2251 /**
2252 * DOC: global_ppt_resid
2253 *
2254 * The amdgpu driver provides a sysfs API for retrieving global ppt residency.
2255 * The file global_ppt_resid is used for this.
2256 */
amdgpu_show_global_ppt_resid(struct device * dev,struct device_attribute * attr,char * buf)2257 static ssize_t amdgpu_show_global_ppt_resid(struct device *dev,
2258 struct device_attribute *attr, char *buf)
2259 {
2260 struct drm_device *ddev = dev_get_drvdata(dev);
2261 struct amdgpu_device *adev = drm_to_adev(ddev);
2262 u32 gpptresid;
2263 int r;
2264
2265 /* get the global ppt residency */
2266 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY,
2267 (void *)&gpptresid);
2268 if (r)
2269 return r;
2270
2271 return sysfs_emit(buf, "%u\n", gpptresid);
2272 }
2273
2274 /**
2275 * DOC: max_node_power_limit
2276 *
2277 * The amdgpu driver provides a sysfs API for retrieving maximum node power limit.
2278 * The file max_node_power_limit is used for this.
2279 */
amdgpu_show_max_node_power_limit(struct device * dev,struct device_attribute * attr,char * buf)2280 static ssize_t amdgpu_show_max_node_power_limit(struct device *dev,
2281 struct device_attribute *attr, char *buf)
2282 {
2283 struct drm_device *ddev = dev_get_drvdata(dev);
2284 struct amdgpu_device *adev = drm_to_adev(ddev);
2285 u32 max_nplimit;
2286 int r;
2287
2288 /* get the max node power limit */
2289 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
2290 (void *)&max_nplimit);
2291 if (r)
2292 return r;
2293
2294 return sysfs_emit(buf, "%u\n", max_nplimit);
2295 }
2296
2297 static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
2298 static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
2299 static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL);
2300 static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL);
2301 static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL);
2302 static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL);
2303 static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL);
2304
2305 static struct attribute *board_attrs[] = {
2306 &dev_attr_baseboard_temp.attr,
2307 &dev_attr_gpuboard_temp.attr,
2308 NULL
2309 };
2310
amdgpu_board_attr_visible(struct kobject * kobj,struct attribute * attr,int n)2311 static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
2312 {
2313 struct device *dev = kobj_to_dev(kobj);
2314 struct drm_device *ddev = dev_get_drvdata(dev);
2315 struct amdgpu_device *adev = drm_to_adev(ddev);
2316
2317 if (attr == &dev_attr_baseboard_temp.attr) {
2318 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
2319 return 0;
2320 }
2321
2322 if (attr == &dev_attr_gpuboard_temp.attr) {
2323 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
2324 return 0;
2325 }
2326
2327 return attr->mode;
2328 }
2329
2330 const struct attribute_group amdgpu_board_attr_group = {
2331 .name = "board",
2332 .attrs = board_attrs,
2333 .is_visible = amdgpu_board_attr_visible,
2334 };
2335
2336 /* pm policy attributes */
2337 struct amdgpu_pm_policy_attr {
2338 struct device_attribute dev_attr;
2339 enum pp_pm_policy id;
2340 };
2341
2342 /**
2343 * DOC: pm_policy
2344 *
2345 * Certain SOCs can support different power policies to optimize application
2346 * performance. However, this policy is provided only at SOC level and not at a
2347 * per-process level. This is useful especially when entire SOC is utilized for
2348 * dedicated workload.
2349 *
2350 * The amdgpu driver provides a sysfs API for selecting the policy. Presently,
2351 * only two types of policies are supported through this interface.
2352 *
2353 * Pstate Policy Selection - This is to select different Pstate profiles which
2354 * decides clock/throttling preferences.
2355 *
2356 * XGMI PLPD Policy Selection - When multiple devices are connected over XGMI,
2357 * this helps to select policy to be applied for per link power down.
2358 *
2359 * The list of available policies and policy levels vary between SOCs. They can
2360 * be viewed under pm_policy node directory. If SOC doesn't support any policy,
2361 * this node won't be available. The different policies supported will be
2362 * available as separate nodes under pm_policy.
2363 *
2364 * cat /sys/bus/pci/devices/.../pm_policy/<policy_type>
2365 *
2366 * Reading the policy file shows the different levels supported. The level which
2367 * is applied presently is denoted by * (asterisk). E.g.,
2368 *
2369 * .. code-block:: console
2370 *
2371 * cat /sys/bus/pci/devices/.../pm_policy/soc_pstate
2372 * 0 : soc_pstate_default
2373 * 1 : soc_pstate_0
2374 * 2 : soc_pstate_1*
2375 * 3 : soc_pstate_2
2376 *
2377 * cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2378 * 0 : plpd_disallow
2379 * 1 : plpd_default
2380 * 2 : plpd_optimized*
2381 *
2382 * To apply a specific policy
2383 *
2384 * "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>"
2385 *
2386 * For the levels listed in the example above, to select "plpd_optimized" for
2387 * XGMI and "soc_pstate_2" for soc pstate policy -
2388 *
2389 * .. code-block:: console
2390 *
2391 * echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2392 * echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate
2393 *
2394 */
amdgpu_get_pm_policy_attr(struct device * dev,struct device_attribute * attr,char * buf)2395 static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
2396 struct device_attribute *attr,
2397 char *buf)
2398 {
2399 struct drm_device *ddev = dev_get_drvdata(dev);
2400 struct amdgpu_device *adev = drm_to_adev(ddev);
2401 struct amdgpu_pm_policy_attr *policy_attr;
2402
2403 policy_attr =
2404 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2405
2406 return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
2407 }
2408
amdgpu_set_pm_policy_attr(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2409 static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
2410 struct device_attribute *attr,
2411 const char *buf, size_t count)
2412 {
2413 struct drm_device *ddev = dev_get_drvdata(dev);
2414 struct amdgpu_device *adev = drm_to_adev(ddev);
2415 struct amdgpu_pm_policy_attr *policy_attr;
2416 int ret, num_params = 0;
2417 char delimiter[] = " \n\t";
2418 char tmp_buf[128];
2419 char *tmp, *param;
2420 long val;
2421
2422 count = min(count, sizeof(tmp_buf));
2423 memcpy(tmp_buf, buf, count);
2424 tmp_buf[count - 1] = '\0';
2425 tmp = tmp_buf;
2426
2427 tmp = skip_spaces(tmp);
2428 while ((param = strsep(&tmp, delimiter))) {
2429 if (!strlen(param)) {
2430 tmp = skip_spaces(tmp);
2431 continue;
2432 }
2433 ret = kstrtol(param, 0, &val);
2434 if (ret)
2435 return -EINVAL;
2436 num_params++;
2437 if (num_params > 1)
2438 return -EINVAL;
2439 }
2440
2441 if (num_params != 1)
2442 return -EINVAL;
2443
2444 policy_attr =
2445 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2446
2447 ret = amdgpu_pm_get_access(adev);
2448 if (ret < 0)
2449 return ret;
2450
2451 ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
2452
2453 amdgpu_pm_put_access(adev);
2454
2455 if (ret)
2456 return ret;
2457
2458 return count;
2459 }
2460
2461 #define AMDGPU_PM_POLICY_ATTR(_name, _id) \
2462 static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \
2463 .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
2464 amdgpu_set_pm_policy_attr), \
2465 .id = PP_PM_POLICY_##_id, \
2466 };
2467
2468 #define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
2469
2470 AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
2471 AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD)
2472
2473 static struct attribute *pm_policy_attrs[] = {
2474 &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
2475 &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd),
2476 NULL
2477 };
2478
amdgpu_pm_policy_attr_visible(struct kobject * kobj,struct attribute * attr,int n)2479 static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
2480 struct attribute *attr, int n)
2481 {
2482 struct device *dev = kobj_to_dev(kobj);
2483 struct drm_device *ddev = dev_get_drvdata(dev);
2484 struct amdgpu_device *adev = drm_to_adev(ddev);
2485 struct amdgpu_pm_policy_attr *policy_attr;
2486
2487 policy_attr =
2488 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
2489
2490 if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
2491 -ENOENT)
2492 return 0;
2493
2494 return attr->mode;
2495 }
2496
2497 const struct attribute_group amdgpu_pm_policy_attr_group = {
2498 .name = "pm_policy",
2499 .attrs = pm_policy_attrs,
2500 .is_visible = amdgpu_pm_policy_attr_visible,
2501 };
2502
2503 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2504 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2505 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2506 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2507 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2508 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2509 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2510 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2511 .attr_update = pp_dpm_clk_default_attr_update),
2512 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2513 .attr_update = pp_dpm_clk_default_attr_update),
2514 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2515 .attr_update = pp_dpm_clk_default_attr_update),
2516 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2517 .attr_update = pp_dpm_clk_default_attr_update),
2518 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2519 .attr_update = pp_dpm_clk_default_attr_update),
2520 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2521 .attr_update = pp_dpm_clk_default_attr_update),
2522 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2523 .attr_update = pp_dpm_clk_default_attr_update),
2524 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2525 .attr_update = pp_dpm_clk_default_attr_update),
2526 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2527 .attr_update = pp_dpm_dcefclk_attr_update),
2528 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2529 .attr_update = pp_dpm_clk_default_attr_update),
2530 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2531 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2532 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2533 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC,
2534 .attr_update = pp_od_clk_voltage_attr_update),
2535 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2536 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2537 AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2538 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2539 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2540 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2541 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2542 AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2543 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2544 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2545 .attr_update = ss_power_attr_update),
2546 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2547 .attr_update = ss_power_attr_update),
2548 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2549 .attr_update = ss_bias_attr_update),
2550 AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC,
2551 .attr_update = amdgpu_pm_metrics_attr_update),
2552 };
2553
default_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)2554 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2555 uint32_t mask, enum amdgpu_device_attr_states *states)
2556 {
2557 struct device_attribute *dev_attr = &attr->dev_attr;
2558 enum amdgpu_device_attr_id attr_id = attr->attr_id;
2559 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2560
2561 if (!(attr->flags & mask)) {
2562 *states = ATTR_STATE_UNSUPPORTED;
2563 return 0;
2564 }
2565
2566 if (DEVICE_ATTR_IS(mem_busy_percent)) {
2567 if ((adev->flags & AMD_IS_APU &&
2568 gc_ver != IP_VERSION(9, 4, 3)) ||
2569 gc_ver == IP_VERSION(9, 0, 1))
2570 *states = ATTR_STATE_UNSUPPORTED;
2571 } else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
2572 if (!(gc_ver == IP_VERSION(9, 3, 0) ||
2573 gc_ver == IP_VERSION(10, 3, 1) ||
2574 gc_ver == IP_VERSION(10, 3, 3) ||
2575 gc_ver == IP_VERSION(10, 3, 6) ||
2576 gc_ver == IP_VERSION(10, 3, 7) ||
2577 gc_ver == IP_VERSION(11, 0, 0) ||
2578 gc_ver == IP_VERSION(11, 0, 1) ||
2579 gc_ver == IP_VERSION(11, 0, 2) ||
2580 gc_ver == IP_VERSION(11, 0, 3) ||
2581 gc_ver == IP_VERSION(11, 0, 4) ||
2582 gc_ver == IP_VERSION(11, 5, 0) ||
2583 gc_ver == IP_VERSION(11, 5, 1) ||
2584 gc_ver == IP_VERSION(11, 5, 2) ||
2585 gc_ver == IP_VERSION(11, 5, 3) ||
2586 gc_ver == IP_VERSION(12, 0, 0) ||
2587 gc_ver == IP_VERSION(12, 0, 1)))
2588 *states = ATTR_STATE_UNSUPPORTED;
2589 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2590 /* PCIe Perf counters won't work on APU nodes */
2591 if (adev->flags & AMD_IS_APU ||
2592 !adev->asic_funcs->get_pcie_usage)
2593 *states = ATTR_STATE_UNSUPPORTED;
2594 } else if (DEVICE_ATTR_IS(unique_id)) {
2595 switch (gc_ver) {
2596 case IP_VERSION(9, 0, 1):
2597 case IP_VERSION(9, 4, 0):
2598 case IP_VERSION(9, 4, 1):
2599 case IP_VERSION(9, 4, 2):
2600 case IP_VERSION(9, 4, 3):
2601 case IP_VERSION(9, 4, 4):
2602 case IP_VERSION(9, 5, 0):
2603 case IP_VERSION(10, 3, 0):
2604 case IP_VERSION(11, 0, 0):
2605 case IP_VERSION(11, 0, 1):
2606 case IP_VERSION(11, 0, 2):
2607 case IP_VERSION(11, 0, 3):
2608 case IP_VERSION(12, 0, 0):
2609 case IP_VERSION(12, 0, 1):
2610 *states = ATTR_STATE_SUPPORTED;
2611 break;
2612 default:
2613 *states = ATTR_STATE_UNSUPPORTED;
2614 }
2615 } else if (DEVICE_ATTR_IS(pp_features)) {
2616 if ((adev->flags & AMD_IS_APU &&
2617 gc_ver != IP_VERSION(9, 4, 3)) ||
2618 gc_ver < IP_VERSION(9, 0, 0))
2619 *states = ATTR_STATE_UNSUPPORTED;
2620 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2621 if (gc_ver < IP_VERSION(9, 1, 0))
2622 *states = ATTR_STATE_UNSUPPORTED;
2623 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2624 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2625 *states = ATTR_STATE_UNSUPPORTED;
2626 else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2627 gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
2628 *states = ATTR_STATE_UNSUPPORTED;
2629 } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
2630 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2631 *states = ATTR_STATE_UNSUPPORTED;
2632 } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
2633 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2634 *states = ATTR_STATE_UNSUPPORTED;
2635 } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2636 u32 limit;
2637
2638 if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2639 -EOPNOTSUPP)
2640 *states = ATTR_STATE_UNSUPPORTED;
2641 }
2642
2643 switch (gc_ver) {
2644 case IP_VERSION(10, 3, 0):
2645 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2646 amdgpu_sriov_vf(adev)) {
2647 dev_attr->attr.mode &= ~0222;
2648 dev_attr->store = NULL;
2649 }
2650 break;
2651 default:
2652 break;
2653 }
2654
2655 return 0;
2656 }
2657
2658
amdgpu_device_attr_create(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,struct list_head * attr_list)2659 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2660 struct amdgpu_device_attr *attr,
2661 uint32_t mask, struct list_head *attr_list)
2662 {
2663 int ret = 0;
2664 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2665 struct amdgpu_device_attr_entry *attr_entry;
2666 struct device_attribute *dev_attr;
2667 const char *name;
2668
2669 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2670 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2671
2672 if (!attr)
2673 return -EINVAL;
2674
2675 dev_attr = &attr->dev_attr;
2676 name = dev_attr->attr.name;
2677
2678 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2679
2680 ret = attr_update(adev, attr, mask, &attr_states);
2681 if (ret) {
2682 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2683 name, ret);
2684 return ret;
2685 }
2686
2687 if (attr_states == ATTR_STATE_UNSUPPORTED)
2688 return 0;
2689
2690 ret = device_create_file(adev->dev, dev_attr);
2691 if (ret) {
2692 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2693 name, ret);
2694 }
2695
2696 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2697 if (!attr_entry)
2698 return -ENOMEM;
2699
2700 attr_entry->attr = attr;
2701 INIT_LIST_HEAD(&attr_entry->entry);
2702
2703 list_add_tail(&attr_entry->entry, attr_list);
2704
2705 return ret;
2706 }
2707
amdgpu_device_attr_remove(struct amdgpu_device * adev,struct amdgpu_device_attr * attr)2708 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2709 {
2710 struct device_attribute *dev_attr = &attr->dev_attr;
2711
2712 device_remove_file(adev->dev, dev_attr);
2713 }
2714
2715 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2716 struct list_head *attr_list);
2717
amdgpu_device_attr_create_groups(struct amdgpu_device * adev,struct amdgpu_device_attr * attrs,uint32_t counts,uint32_t mask,struct list_head * attr_list)2718 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2719 struct amdgpu_device_attr *attrs,
2720 uint32_t counts,
2721 uint32_t mask,
2722 struct list_head *attr_list)
2723 {
2724 int ret = 0;
2725 uint32_t i = 0;
2726
2727 for (i = 0; i < counts; i++) {
2728 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2729 if (ret)
2730 goto failed;
2731 }
2732
2733 return 0;
2734
2735 failed:
2736 amdgpu_device_attr_remove_groups(adev, attr_list);
2737
2738 return ret;
2739 }
2740
amdgpu_device_attr_remove_groups(struct amdgpu_device * adev,struct list_head * attr_list)2741 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2742 struct list_head *attr_list)
2743 {
2744 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2745
2746 if (list_empty(attr_list))
2747 return ;
2748
2749 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2750 amdgpu_device_attr_remove(adev, entry->attr);
2751 list_del(&entry->entry);
2752 kfree(entry);
2753 }
2754 }
2755
amdgpu_hwmon_show_temp(struct device * dev,struct device_attribute * attr,char * buf)2756 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2757 struct device_attribute *attr,
2758 char *buf)
2759 {
2760 struct amdgpu_device *adev = dev_get_drvdata(dev);
2761 int channel = to_sensor_dev_attr(attr)->index;
2762 int r, temp = 0;
2763
2764 if (channel >= PP_TEMP_MAX)
2765 return -EINVAL;
2766
2767 switch (channel) {
2768 case PP_TEMP_JUNCTION:
2769 /* get current junction temperature */
2770 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2771 (void *)&temp);
2772 break;
2773 case PP_TEMP_EDGE:
2774 /* get current edge temperature */
2775 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2776 (void *)&temp);
2777 break;
2778 case PP_TEMP_MEM:
2779 /* get current memory temperature */
2780 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2781 (void *)&temp);
2782 break;
2783 default:
2784 r = -EINVAL;
2785 break;
2786 }
2787
2788 if (r)
2789 return r;
2790
2791 return sysfs_emit(buf, "%d\n", temp);
2792 }
2793
amdgpu_hwmon_show_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2794 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2795 struct device_attribute *attr,
2796 char *buf)
2797 {
2798 struct amdgpu_device *adev = dev_get_drvdata(dev);
2799 int hyst = to_sensor_dev_attr(attr)->index;
2800 int temp;
2801
2802 if (hyst)
2803 temp = adev->pm.dpm.thermal.min_temp;
2804 else
2805 temp = adev->pm.dpm.thermal.max_temp;
2806
2807 return sysfs_emit(buf, "%d\n", temp);
2808 }
2809
amdgpu_hwmon_show_hotspot_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2810 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2811 struct device_attribute *attr,
2812 char *buf)
2813 {
2814 struct amdgpu_device *adev = dev_get_drvdata(dev);
2815 int hyst = to_sensor_dev_attr(attr)->index;
2816 int temp;
2817
2818 if (hyst)
2819 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2820 else
2821 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2822
2823 return sysfs_emit(buf, "%d\n", temp);
2824 }
2825
amdgpu_hwmon_show_mem_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2826 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2827 struct device_attribute *attr,
2828 char *buf)
2829 {
2830 struct amdgpu_device *adev = dev_get_drvdata(dev);
2831 int hyst = to_sensor_dev_attr(attr)->index;
2832 int temp;
2833
2834 if (hyst)
2835 temp = adev->pm.dpm.thermal.min_mem_temp;
2836 else
2837 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2838
2839 return sysfs_emit(buf, "%d\n", temp);
2840 }
2841
amdgpu_hwmon_show_temp_label(struct device * dev,struct device_attribute * attr,char * buf)2842 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2843 struct device_attribute *attr,
2844 char *buf)
2845 {
2846 int channel = to_sensor_dev_attr(attr)->index;
2847
2848 if (channel >= PP_TEMP_MAX)
2849 return -EINVAL;
2850
2851 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2852 }
2853
amdgpu_hwmon_show_temp_emergency(struct device * dev,struct device_attribute * attr,char * buf)2854 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2855 struct device_attribute *attr,
2856 char *buf)
2857 {
2858 struct amdgpu_device *adev = dev_get_drvdata(dev);
2859 int channel = to_sensor_dev_attr(attr)->index;
2860 int temp = 0;
2861
2862 if (channel >= PP_TEMP_MAX)
2863 return -EINVAL;
2864
2865 switch (channel) {
2866 case PP_TEMP_JUNCTION:
2867 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2868 break;
2869 case PP_TEMP_EDGE:
2870 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2871 break;
2872 case PP_TEMP_MEM:
2873 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2874 break;
2875 }
2876
2877 return sysfs_emit(buf, "%d\n", temp);
2878 }
2879
amdgpu_hwmon_get_pwm1_enable(struct device * dev,struct device_attribute * attr,char * buf)2880 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2881 struct device_attribute *attr,
2882 char *buf)
2883 {
2884 struct amdgpu_device *adev = dev_get_drvdata(dev);
2885 u32 pwm_mode = 0;
2886 int ret;
2887
2888 ret = amdgpu_pm_get_access_if_active(adev);
2889 if (ret)
2890 return ret;
2891
2892 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2893
2894 amdgpu_pm_put_access(adev);
2895
2896 if (ret)
2897 return -EINVAL;
2898
2899 return sysfs_emit(buf, "%u\n", pwm_mode);
2900 }
2901
amdgpu_hwmon_set_pwm1_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2902 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2903 struct device_attribute *attr,
2904 const char *buf,
2905 size_t count)
2906 {
2907 struct amdgpu_device *adev = dev_get_drvdata(dev);
2908 int err, ret;
2909 u32 pwm_mode;
2910 int value;
2911
2912 err = kstrtoint(buf, 10, &value);
2913 if (err)
2914 return err;
2915
2916 if (value == 0)
2917 pwm_mode = AMD_FAN_CTRL_NONE;
2918 else if (value == 1)
2919 pwm_mode = AMD_FAN_CTRL_MANUAL;
2920 else if (value == 2)
2921 pwm_mode = AMD_FAN_CTRL_AUTO;
2922 else
2923 return -EINVAL;
2924
2925 ret = amdgpu_pm_get_access(adev);
2926 if (ret < 0)
2927 return ret;
2928
2929 ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2930
2931 amdgpu_pm_put_access(adev);
2932
2933 if (ret)
2934 return -EINVAL;
2935
2936 return count;
2937 }
2938
amdgpu_hwmon_get_pwm1_min(struct device * dev,struct device_attribute * attr,char * buf)2939 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2940 struct device_attribute *attr,
2941 char *buf)
2942 {
2943 return sysfs_emit(buf, "%i\n", 0);
2944 }
2945
amdgpu_hwmon_get_pwm1_max(struct device * dev,struct device_attribute * attr,char * buf)2946 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2947 struct device_attribute *attr,
2948 char *buf)
2949 {
2950 return sysfs_emit(buf, "%i\n", 255);
2951 }
2952
amdgpu_hwmon_set_pwm1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2953 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2954 struct device_attribute *attr,
2955 const char *buf, size_t count)
2956 {
2957 struct amdgpu_device *adev = dev_get_drvdata(dev);
2958 int err;
2959 u32 value;
2960 u32 pwm_mode;
2961
2962 err = kstrtou32(buf, 10, &value);
2963 if (err)
2964 return err;
2965
2966 err = amdgpu_pm_get_access(adev);
2967 if (err < 0)
2968 return err;
2969
2970 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2971 if (err)
2972 goto out;
2973
2974 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2975 pr_info("manual fan speed control should be enabled first\n");
2976 err = -EINVAL;
2977 goto out;
2978 }
2979
2980 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2981
2982 out:
2983 amdgpu_pm_put_access(adev);
2984
2985 if (err)
2986 return err;
2987
2988 return count;
2989 }
2990
amdgpu_hwmon_get_pwm1(struct device * dev,struct device_attribute * attr,char * buf)2991 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2992 struct device_attribute *attr,
2993 char *buf)
2994 {
2995 struct amdgpu_device *adev = dev_get_drvdata(dev);
2996 int err;
2997 u32 speed = 0;
2998
2999 err = amdgpu_pm_get_access_if_active(adev);
3000 if (err)
3001 return err;
3002
3003 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
3004
3005 amdgpu_pm_put_access(adev);
3006
3007 if (err)
3008 return err;
3009
3010 return sysfs_emit(buf, "%i\n", speed);
3011 }
3012
amdgpu_hwmon_get_fan1_input(struct device * dev,struct device_attribute * attr,char * buf)3013 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
3014 struct device_attribute *attr,
3015 char *buf)
3016 {
3017 struct amdgpu_device *adev = dev_get_drvdata(dev);
3018 int err;
3019 u32 speed = 0;
3020
3021 err = amdgpu_pm_get_access_if_active(adev);
3022 if (err)
3023 return err;
3024
3025 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
3026
3027 amdgpu_pm_put_access(adev);
3028
3029 if (err)
3030 return err;
3031
3032 return sysfs_emit(buf, "%i\n", speed);
3033 }
3034
amdgpu_hwmon_get_fan1_min(struct device * dev,struct device_attribute * attr,char * buf)3035 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
3036 struct device_attribute *attr,
3037 char *buf)
3038 {
3039 struct amdgpu_device *adev = dev_get_drvdata(dev);
3040 u32 min_rpm = 0;
3041 int r;
3042
3043 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
3044 (void *)&min_rpm);
3045
3046 if (r)
3047 return r;
3048
3049 return sysfs_emit(buf, "%d\n", min_rpm);
3050 }
3051
amdgpu_hwmon_get_fan1_max(struct device * dev,struct device_attribute * attr,char * buf)3052 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
3053 struct device_attribute *attr,
3054 char *buf)
3055 {
3056 struct amdgpu_device *adev = dev_get_drvdata(dev);
3057 u32 max_rpm = 0;
3058 int r;
3059
3060 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
3061 (void *)&max_rpm);
3062
3063 if (r)
3064 return r;
3065
3066 return sysfs_emit(buf, "%d\n", max_rpm);
3067 }
3068
amdgpu_hwmon_get_fan1_target(struct device * dev,struct device_attribute * attr,char * buf)3069 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
3070 struct device_attribute *attr,
3071 char *buf)
3072 {
3073 struct amdgpu_device *adev = dev_get_drvdata(dev);
3074 int err;
3075 u32 rpm = 0;
3076
3077 err = amdgpu_pm_get_access_if_active(adev);
3078 if (err)
3079 return err;
3080
3081 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
3082
3083 amdgpu_pm_put_access(adev);
3084
3085 if (err)
3086 return err;
3087
3088 return sysfs_emit(buf, "%i\n", rpm);
3089 }
3090
amdgpu_hwmon_set_fan1_target(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3091 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
3092 struct device_attribute *attr,
3093 const char *buf, size_t count)
3094 {
3095 struct amdgpu_device *adev = dev_get_drvdata(dev);
3096 int err;
3097 u32 value;
3098 u32 pwm_mode;
3099
3100 err = kstrtou32(buf, 10, &value);
3101 if (err)
3102 return err;
3103
3104 err = amdgpu_pm_get_access(adev);
3105 if (err < 0)
3106 return err;
3107
3108 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3109 if (err)
3110 goto out;
3111
3112 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
3113 err = -ENODATA;
3114 goto out;
3115 }
3116
3117 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
3118
3119 out:
3120 amdgpu_pm_put_access(adev);
3121
3122 if (err)
3123 return err;
3124
3125 return count;
3126 }
3127
amdgpu_hwmon_get_fan1_enable(struct device * dev,struct device_attribute * attr,char * buf)3128 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
3129 struct device_attribute *attr,
3130 char *buf)
3131 {
3132 struct amdgpu_device *adev = dev_get_drvdata(dev);
3133 u32 pwm_mode = 0;
3134 int ret;
3135
3136 ret = amdgpu_pm_get_access_if_active(adev);
3137 if (ret)
3138 return ret;
3139
3140 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3141
3142 amdgpu_pm_put_access(adev);
3143
3144 if (ret)
3145 return -EINVAL;
3146
3147 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
3148 }
3149
amdgpu_hwmon_set_fan1_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3150 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
3151 struct device_attribute *attr,
3152 const char *buf,
3153 size_t count)
3154 {
3155 struct amdgpu_device *adev = dev_get_drvdata(dev);
3156 int err;
3157 int value;
3158 u32 pwm_mode;
3159
3160 err = kstrtoint(buf, 10, &value);
3161 if (err)
3162 return err;
3163
3164 if (value == 0)
3165 pwm_mode = AMD_FAN_CTRL_AUTO;
3166 else if (value == 1)
3167 pwm_mode = AMD_FAN_CTRL_MANUAL;
3168 else
3169 return -EINVAL;
3170
3171 err = amdgpu_pm_get_access(adev);
3172 if (err < 0)
3173 return err;
3174
3175 err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
3176
3177 amdgpu_pm_put_access(adev);
3178
3179 if (err)
3180 return -EINVAL;
3181
3182 return count;
3183 }
3184
amdgpu_hwmon_show_vddgfx(struct device * dev,struct device_attribute * attr,char * buf)3185 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
3186 struct device_attribute *attr,
3187 char *buf)
3188 {
3189 struct amdgpu_device *adev = dev_get_drvdata(dev);
3190 u32 vddgfx;
3191 int r;
3192
3193 /* get the voltage */
3194 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
3195 (void *)&vddgfx);
3196 if (r)
3197 return r;
3198
3199 return sysfs_emit(buf, "%d\n", vddgfx);
3200 }
3201
amdgpu_hwmon_show_vddboard(struct device * dev,struct device_attribute * attr,char * buf)3202 static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
3203 struct device_attribute *attr,
3204 char *buf)
3205 {
3206 struct amdgpu_device *adev = dev_get_drvdata(dev);
3207 u32 vddboard;
3208 int r;
3209
3210 /* get the voltage */
3211 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3212 (void *)&vddboard);
3213 if (r)
3214 return r;
3215
3216 return sysfs_emit(buf, "%d\n", vddboard);
3217 }
3218
amdgpu_hwmon_show_vddgfx_label(struct device * dev,struct device_attribute * attr,char * buf)3219 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
3220 struct device_attribute *attr,
3221 char *buf)
3222 {
3223 return sysfs_emit(buf, "vddgfx\n");
3224 }
3225
amdgpu_hwmon_show_vddboard_label(struct device * dev,struct device_attribute * attr,char * buf)3226 static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
3227 struct device_attribute *attr,
3228 char *buf)
3229 {
3230 return sysfs_emit(buf, "vddboard\n");
3231 }
amdgpu_hwmon_show_vddnb(struct device * dev,struct device_attribute * attr,char * buf)3232 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
3233 struct device_attribute *attr,
3234 char *buf)
3235 {
3236 struct amdgpu_device *adev = dev_get_drvdata(dev);
3237 u32 vddnb;
3238 int r;
3239
3240 /* only APUs have vddnb */
3241 if (!(adev->flags & AMD_IS_APU))
3242 return -EINVAL;
3243
3244 /* get the voltage */
3245 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
3246 (void *)&vddnb);
3247 if (r)
3248 return r;
3249
3250 return sysfs_emit(buf, "%d\n", vddnb);
3251 }
3252
amdgpu_hwmon_show_vddnb_label(struct device * dev,struct device_attribute * attr,char * buf)3253 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
3254 struct device_attribute *attr,
3255 char *buf)
3256 {
3257 return sysfs_emit(buf, "vddnb\n");
3258 }
3259
amdgpu_hwmon_get_power(struct device * dev,enum amd_pp_sensors sensor)3260 static int amdgpu_hwmon_get_power(struct device *dev,
3261 enum amd_pp_sensors sensor)
3262 {
3263 struct amdgpu_device *adev = dev_get_drvdata(dev);
3264 unsigned int uw;
3265 u32 query = 0;
3266 int r;
3267
3268 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query);
3269 if (r)
3270 return r;
3271
3272 /* convert to microwatts */
3273 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3274
3275 return uw;
3276 }
3277
amdgpu_hwmon_show_power_avg(struct device * dev,struct device_attribute * attr,char * buf)3278 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3279 struct device_attribute *attr,
3280 char *buf)
3281 {
3282 ssize_t val;
3283
3284 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
3285 if (val < 0)
3286 return val;
3287
3288 return sysfs_emit(buf, "%zd\n", val);
3289 }
3290
amdgpu_hwmon_show_power_input(struct device * dev,struct device_attribute * attr,char * buf)3291 static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
3292 struct device_attribute *attr,
3293 char *buf)
3294 {
3295 ssize_t val;
3296
3297 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
3298 if (val < 0)
3299 return val;
3300
3301 return sysfs_emit(buf, "%zd\n", val);
3302 }
3303
amdgpu_hwmon_show_power_cap_generic(struct device * dev,struct device_attribute * attr,char * buf,enum pp_power_limit_level pp_limit_level)3304 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
3305 struct device_attribute *attr,
3306 char *buf,
3307 enum pp_power_limit_level pp_limit_level)
3308 {
3309 struct amdgpu_device *adev = dev_get_drvdata(dev);
3310 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
3311 uint32_t limit;
3312 ssize_t size;
3313 int r;
3314
3315 r = amdgpu_pm_get_access_if_active(adev);
3316 if (r)
3317 return r;
3318
3319 r = amdgpu_dpm_get_power_limit(adev, &limit,
3320 pp_limit_level, power_type);
3321
3322 if (!r)
3323 size = sysfs_emit(buf, "%u\n", limit * 1000000);
3324 else
3325 size = sysfs_emit(buf, "\n");
3326
3327 amdgpu_pm_put_access(adev);
3328
3329 return size;
3330 }
3331
amdgpu_hwmon_show_power_cap_min(struct device * dev,struct device_attribute * attr,char * buf)3332 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3333 struct device_attribute *attr,
3334 char *buf)
3335 {
3336 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
3337 }
3338
amdgpu_hwmon_show_power_cap_max(struct device * dev,struct device_attribute * attr,char * buf)3339 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3340 struct device_attribute *attr,
3341 char *buf)
3342 {
3343 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3344
3345 }
3346
amdgpu_hwmon_show_power_cap(struct device * dev,struct device_attribute * attr,char * buf)3347 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3348 struct device_attribute *attr,
3349 char *buf)
3350 {
3351 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3352
3353 }
3354
amdgpu_hwmon_show_power_cap_default(struct device * dev,struct device_attribute * attr,char * buf)3355 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3356 struct device_attribute *attr,
3357 char *buf)
3358 {
3359 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3360
3361 }
3362
amdgpu_hwmon_show_power_label(struct device * dev,struct device_attribute * attr,char * buf)3363 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3364 struct device_attribute *attr,
3365 char *buf)
3366 {
3367 struct amdgpu_device *adev = dev_get_drvdata(dev);
3368 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3369
3370 if (gc_ver == IP_VERSION(10, 3, 1))
3371 return sysfs_emit(buf, "%s\n",
3372 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3373 "fastPPT" : "slowPPT");
3374 else
3375 return sysfs_emit(buf, "PPT\n");
3376 }
3377
amdgpu_hwmon_set_power_cap(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3378 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3379 struct device_attribute *attr,
3380 const char *buf,
3381 size_t count)
3382 {
3383 struct amdgpu_device *adev = dev_get_drvdata(dev);
3384 int limit_type = to_sensor_dev_attr(attr)->index;
3385 int err;
3386 u32 value;
3387
3388 err = kstrtou32(buf, 10, &value);
3389 if (err)
3390 return err;
3391
3392 value = value / 1000000; /* convert to Watt */
3393 value |= limit_type << 24;
3394
3395 err = amdgpu_pm_get_access(adev);
3396 if (err < 0)
3397 return err;
3398
3399 err = amdgpu_dpm_set_power_limit(adev, value);
3400
3401 amdgpu_pm_put_access(adev);
3402
3403 if (err)
3404 return err;
3405
3406 return count;
3407 }
3408
amdgpu_hwmon_show_sclk(struct device * dev,struct device_attribute * attr,char * buf)3409 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3410 struct device_attribute *attr,
3411 char *buf)
3412 {
3413 struct amdgpu_device *adev = dev_get_drvdata(dev);
3414 uint32_t sclk;
3415 int r;
3416
3417 /* get the sclk */
3418 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3419 (void *)&sclk);
3420 if (r)
3421 return r;
3422
3423 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3424 }
3425
amdgpu_hwmon_show_sclk_label(struct device * dev,struct device_attribute * attr,char * buf)3426 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3427 struct device_attribute *attr,
3428 char *buf)
3429 {
3430 return sysfs_emit(buf, "sclk\n");
3431 }
3432
amdgpu_hwmon_show_mclk(struct device * dev,struct device_attribute * attr,char * buf)3433 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3434 struct device_attribute *attr,
3435 char *buf)
3436 {
3437 struct amdgpu_device *adev = dev_get_drvdata(dev);
3438 uint32_t mclk;
3439 int r;
3440
3441 /* get the sclk */
3442 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3443 (void *)&mclk);
3444 if (r)
3445 return r;
3446
3447 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3448 }
3449
amdgpu_hwmon_show_mclk_label(struct device * dev,struct device_attribute * attr,char * buf)3450 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3451 struct device_attribute *attr,
3452 char *buf)
3453 {
3454 return sysfs_emit(buf, "mclk\n");
3455 }
3456
3457 /**
3458 * DOC: hwmon
3459 *
3460 * The amdgpu driver exposes the following sensor interfaces:
3461 *
3462 * - GPU temperature (via the on-die sensor)
3463 *
3464 * - GPU voltage
3465 *
3466 * - Northbridge voltage (APUs only)
3467 *
3468 * - GPU power
3469 *
3470 * - GPU fan
3471 *
3472 * - GPU gfx/compute engine clock
3473 *
3474 * - GPU memory clock (dGPU only)
3475 *
3476 * hwmon interfaces for GPU temperature:
3477 *
3478 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3479 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
3480 *
3481 * - temp[1-3]_label: temperature channel label
3482 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3483 *
3484 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3485 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3486 *
3487 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3488 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3489 *
3490 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3491 * - these are supported on SOC15 dGPUs only
3492 *
3493 * hwmon interfaces for GPU voltage:
3494 *
3495 * - in0_input: the voltage on the GPU in millivolts
3496 *
3497 * - in1_input: the voltage on the Northbridge in millivolts
3498 *
3499 * hwmon interfaces for GPU power:
3500 *
3501 * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
3502 *
3503 * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU.
3504 *
3505 * - power1_cap_min: minimum cap supported in microWatts
3506 *
3507 * - power1_cap_max: maximum cap supported in microWatts
3508 *
3509 * - power1_cap: selected power cap in microWatts
3510 *
3511 * hwmon interfaces for GPU fan:
3512 *
3513 * - pwm1: pulse width modulation fan level (0-255)
3514 *
3515 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3516 *
3517 * - pwm1_min: pulse width modulation fan control minimum level (0)
3518 *
3519 * - pwm1_max: pulse width modulation fan control maximum level (255)
3520 *
3521 * - fan1_min: a minimum value Unit: revolution/min (RPM)
3522 *
3523 * - fan1_max: a maximum value Unit: revolution/max (RPM)
3524 *
3525 * - fan1_input: fan speed in RPM
3526 *
3527 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3528 *
3529 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3530 *
3531 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3532 * That will get the former one overridden.
3533 *
3534 * hwmon interfaces for GPU clocks:
3535 *
3536 * - freq1_input: the gfx/compute clock in hertz
3537 *
3538 * - freq2_input: the memory clock in hertz
3539 *
3540 * You can use hwmon tools like sensors to view this information on your system.
3541 *
3542 */
3543
3544 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3545 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3546 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3547 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3548 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3549 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3550 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3551 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3552 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3553 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3554 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3555 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3556 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3557 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3558 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3559 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3560 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3561 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3562 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3563 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3564 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3565 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3566 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3567 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3568 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3569 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3570 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3571 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3572 static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
3573 static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
3574 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3575 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3576 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3577 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3578 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3579 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3580 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3581 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3582 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3583 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3584 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3585 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3586 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3587 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3588 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3589 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3590 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3591
3592 static struct attribute *hwmon_attributes[] = {
3593 &sensor_dev_attr_temp1_input.dev_attr.attr,
3594 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3595 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3596 &sensor_dev_attr_temp2_input.dev_attr.attr,
3597 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3598 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3599 &sensor_dev_attr_temp3_input.dev_attr.attr,
3600 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3601 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3602 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3603 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3604 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3605 &sensor_dev_attr_temp1_label.dev_attr.attr,
3606 &sensor_dev_attr_temp2_label.dev_attr.attr,
3607 &sensor_dev_attr_temp3_label.dev_attr.attr,
3608 &sensor_dev_attr_pwm1.dev_attr.attr,
3609 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3610 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3611 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3612 &sensor_dev_attr_fan1_input.dev_attr.attr,
3613 &sensor_dev_attr_fan1_min.dev_attr.attr,
3614 &sensor_dev_attr_fan1_max.dev_attr.attr,
3615 &sensor_dev_attr_fan1_target.dev_attr.attr,
3616 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3617 &sensor_dev_attr_in0_input.dev_attr.attr,
3618 &sensor_dev_attr_in0_label.dev_attr.attr,
3619 &sensor_dev_attr_in1_input.dev_attr.attr,
3620 &sensor_dev_attr_in1_label.dev_attr.attr,
3621 &sensor_dev_attr_in2_input.dev_attr.attr,
3622 &sensor_dev_attr_in2_label.dev_attr.attr,
3623 &sensor_dev_attr_power1_average.dev_attr.attr,
3624 &sensor_dev_attr_power1_input.dev_attr.attr,
3625 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3626 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3627 &sensor_dev_attr_power1_cap.dev_attr.attr,
3628 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3629 &sensor_dev_attr_power1_label.dev_attr.attr,
3630 &sensor_dev_attr_power2_average.dev_attr.attr,
3631 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3632 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3633 &sensor_dev_attr_power2_cap.dev_attr.attr,
3634 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3635 &sensor_dev_attr_power2_label.dev_attr.attr,
3636 &sensor_dev_attr_freq1_input.dev_attr.attr,
3637 &sensor_dev_attr_freq1_label.dev_attr.attr,
3638 &sensor_dev_attr_freq2_input.dev_attr.attr,
3639 &sensor_dev_attr_freq2_label.dev_attr.attr,
3640 NULL
3641 };
3642
hwmon_attributes_visible(struct kobject * kobj,struct attribute * attr,int index)3643 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3644 struct attribute *attr, int index)
3645 {
3646 struct device *dev = kobj_to_dev(kobj);
3647 struct amdgpu_device *adev = dev_get_drvdata(dev);
3648 umode_t effective_mode = attr->mode;
3649 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3650 uint32_t tmp;
3651
3652 /* under pp one vf mode manage of hwmon attributes is not supported */
3653 if (amdgpu_sriov_is_pp_one_vf(adev))
3654 effective_mode &= ~S_IWUSR;
3655
3656 /* Skip fan attributes if fan is not present */
3657 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3658 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3659 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3660 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3661 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3662 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3663 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3664 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3665 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3666 return 0;
3667
3668 /* Skip fan attributes on APU */
3669 if ((adev->flags & AMD_IS_APU) &&
3670 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3671 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3672 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3673 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3674 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3675 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3676 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3677 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3678 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3679 return 0;
3680
3681 /* Skip crit temp on APU */
3682 if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3683 (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
3684 gc_ver == IP_VERSION(9, 5, 0))) &&
3685 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3686 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3687 return 0;
3688
3689 /* Skip limit attributes if DPM is not enabled */
3690 if (!adev->pm.dpm_enabled &&
3691 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3692 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3693 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3694 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3695 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3696 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3697 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3698 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3699 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3700 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3701 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3702 return 0;
3703
3704 /* mask fan attributes if we have no bindings for this asic to expose */
3705 if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3706 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3707 ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3708 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3709 effective_mode &= ~S_IRUGO;
3710
3711 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3712 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3713 ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3714 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3715 effective_mode &= ~S_IWUSR;
3716
3717 /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3718 if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3719 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3720 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3721 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
3722 if (adev->family == AMDGPU_FAMILY_SI ||
3723 ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
3724 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
3725 (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
3726 return 0;
3727 }
3728
3729 if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr &&
3730 amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT))
3731 effective_mode |= S_IWUSR;
3732
3733 /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3734 if (((adev->family == AMDGPU_FAMILY_SI) ||
3735 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3736 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3737 return 0;
3738
3739 /* not all products support both average and instantaneous */
3740 if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3741 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER,
3742 (void *)&tmp) == -EOPNOTSUPP)
3743 return 0;
3744 if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3745 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
3746 (void *)&tmp) == -EOPNOTSUPP)
3747 return 0;
3748
3749 /* hide max/min values if we can't both query and manage the fan */
3750 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3751 (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3752 (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3753 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3754 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3755 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3756 return 0;
3757
3758 if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3759 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3760 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3761 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3762 return 0;
3763
3764 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3765 adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
3766 (gc_ver == IP_VERSION(9, 4, 3) ||
3767 gc_ver == IP_VERSION(9, 4, 4) ||
3768 gc_ver == IP_VERSION(9, 5, 0))) &&
3769 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3770 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3771 return 0;
3772
3773 /* only APUs other than gc 9,4,3 have vddnb */
3774 if ((!(adev->flags & AMD_IS_APU) ||
3775 (gc_ver == IP_VERSION(9, 4, 3) ||
3776 gc_ver == IP_VERSION(9, 4, 4) ||
3777 gc_ver == IP_VERSION(9, 5, 0))) &&
3778 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3779 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3780 return 0;
3781
3782 /* only few boards support vddboard */
3783 if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
3784 attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
3785 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3786 (void *)&tmp) == -EOPNOTSUPP)
3787 return 0;
3788
3789 /* no mclk on APUs other than gc 9,4,3*/
3790 if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3791 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3792 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3793 return 0;
3794
3795 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3796 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) &&
3797 (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3798 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3799 attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3800 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3801 attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3802 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3803 return 0;
3804
3805 /* hotspot temperature for gc 9,4,3*/
3806 if (gc_ver == IP_VERSION(9, 4, 3) ||
3807 gc_ver == IP_VERSION(9, 4, 4) ||
3808 gc_ver == IP_VERSION(9, 5, 0)) {
3809 if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3810 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3811 attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3812 return 0;
3813
3814 if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3815 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3816 return attr->mode;
3817 }
3818
3819 /* only SOC15 dGPUs support hotspot and mem temperatures */
3820 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3821 (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3822 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3823 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3824 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3825 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3826 return 0;
3827
3828 /* only Vangogh has fast PPT limit and power labels */
3829 if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
3830 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3831 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3832 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3833 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3834 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3835 attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3836 return 0;
3837
3838 return effective_mode;
3839 }
3840
3841 static const struct attribute_group hwmon_attrgroup = {
3842 .attrs = hwmon_attributes,
3843 .is_visible = hwmon_attributes_visible,
3844 };
3845
3846 static const struct attribute_group *hwmon_groups[] = {
3847 &hwmon_attrgroup,
3848 NULL
3849 };
3850
amdgpu_retrieve_od_settings(struct amdgpu_device * adev,enum pp_clock_type od_type,char * buf)3851 static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3852 enum pp_clock_type od_type,
3853 char *buf)
3854 {
3855 int size = 0;
3856 int ret;
3857
3858 ret = amdgpu_pm_get_access_if_active(adev);
3859 if (ret)
3860 return ret;
3861
3862 size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
3863 if (size == 0)
3864 size = sysfs_emit(buf, "\n");
3865
3866 amdgpu_pm_put_access(adev);
3867
3868 return size;
3869 }
3870
parse_input_od_command_lines(const char * buf,size_t count,u32 * type,long * params,uint32_t * num_of_params)3871 static int parse_input_od_command_lines(const char *buf,
3872 size_t count,
3873 u32 *type,
3874 long *params,
3875 uint32_t *num_of_params)
3876 {
3877 const char delimiter[3] = {' ', '\n', '\0'};
3878 uint32_t parameter_size = 0;
3879 char buf_cpy[128] = {0};
3880 char *tmp_str, *sub_str;
3881 int ret;
3882
3883 if (count > sizeof(buf_cpy) - 1)
3884 return -EINVAL;
3885
3886 memcpy(buf_cpy, buf, count);
3887 tmp_str = buf_cpy;
3888
3889 /* skip heading spaces */
3890 while (isspace(*tmp_str))
3891 tmp_str++;
3892
3893 switch (*tmp_str) {
3894 case 'c':
3895 *type = PP_OD_COMMIT_DPM_TABLE;
3896 return 0;
3897 case 'r':
3898 params[parameter_size] = *type;
3899 *num_of_params = 1;
3900 *type = PP_OD_RESTORE_DEFAULT_TABLE;
3901 return 0;
3902 default:
3903 break;
3904 }
3905
3906 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3907 if (strlen(sub_str) == 0)
3908 continue;
3909
3910 ret = kstrtol(sub_str, 0, ¶ms[parameter_size]);
3911 if (ret)
3912 return -EINVAL;
3913 parameter_size++;
3914
3915 if (!tmp_str)
3916 break;
3917
3918 while (isspace(*tmp_str))
3919 tmp_str++;
3920 }
3921
3922 *num_of_params = parameter_size;
3923
3924 return 0;
3925 }
3926
3927 static int
amdgpu_distribute_custom_od_settings(struct amdgpu_device * adev,enum PP_OD_DPM_TABLE_COMMAND cmd_type,const char * in_buf,size_t count)3928 amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3929 enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3930 const char *in_buf,
3931 size_t count)
3932 {
3933 uint32_t parameter_size = 0;
3934 long parameter[64];
3935 int ret;
3936
3937 ret = parse_input_od_command_lines(in_buf,
3938 count,
3939 &cmd_type,
3940 parameter,
3941 ¶meter_size);
3942 if (ret)
3943 return ret;
3944
3945 ret = amdgpu_pm_get_access(adev);
3946 if (ret < 0)
3947 return ret;
3948
3949 ret = amdgpu_dpm_odn_edit_dpm_table(adev,
3950 cmd_type,
3951 parameter,
3952 parameter_size);
3953 if (ret)
3954 goto err_out;
3955
3956 if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
3957 ret = amdgpu_dpm_dispatch_task(adev,
3958 AMD_PP_TASK_READJUST_POWER_STATE,
3959 NULL);
3960 if (ret)
3961 goto err_out;
3962 }
3963
3964 amdgpu_pm_put_access(adev);
3965
3966 return count;
3967
3968 err_out:
3969 amdgpu_pm_put_access(adev);
3970
3971 return ret;
3972 }
3973
3974 /**
3975 * DOC: fan_curve
3976 *
3977 * The amdgpu driver provides a sysfs API for checking and adjusting the fan
3978 * control curve line.
3979 *
3980 * Reading back the file shows you the current settings(temperature in Celsius
3981 * degree and fan speed in pwm) applied to every anchor point of the curve line
3982 * and their permitted ranges if changable.
3983 *
3984 * Writing a desired string(with the format like "anchor_point_index temperature
3985 * fan_speed_in_pwm") to the file, change the settings for the specific anchor
3986 * point accordingly.
3987 *
3988 * When you have finished the editing, write "c" (commit) to the file to commit
3989 * your changes.
3990 *
3991 * If you want to reset to the default value, write "r" (reset) to the file to
3992 * reset them
3993 *
3994 * There are two fan control modes supported: auto and manual. With auto mode,
3995 * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
3996 * While with manual mode, users can set their own fan curve line as what
3997 * described here. Normally the ASIC is booted up with auto mode. Any
3998 * settings via this interface will switch the fan control to manual mode
3999 * implicitly.
4000 */
fan_curve_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4001 static ssize_t fan_curve_show(struct kobject *kobj,
4002 struct kobj_attribute *attr,
4003 char *buf)
4004 {
4005 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4006 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4007
4008 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
4009 }
4010
fan_curve_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4011 static ssize_t fan_curve_store(struct kobject *kobj,
4012 struct kobj_attribute *attr,
4013 const char *buf,
4014 size_t count)
4015 {
4016 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4017 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4018
4019 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4020 PP_OD_EDIT_FAN_CURVE,
4021 buf,
4022 count);
4023 }
4024
fan_curve_visible(struct amdgpu_device * adev)4025 static umode_t fan_curve_visible(struct amdgpu_device *adev)
4026 {
4027 umode_t umode = 0000;
4028
4029 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
4030 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4031
4032 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
4033 umode |= S_IWUSR;
4034
4035 return umode;
4036 }
4037
4038 /**
4039 * DOC: acoustic_limit_rpm_threshold
4040 *
4041 * The amdgpu driver provides a sysfs API for checking and adjusting the
4042 * acoustic limit in RPM for fan control.
4043 *
4044 * Reading back the file shows you the current setting and the permitted
4045 * ranges if changable.
4046 *
4047 * Writing an integer to the file, change the setting accordingly.
4048 *
4049 * When you have finished the editing, write "c" (commit) to the file to commit
4050 * your changes.
4051 *
4052 * If you want to reset to the default value, write "r" (reset) to the file to
4053 * reset them
4054 *
4055 * This setting works under auto fan control mode only. It adjusts the PMFW's
4056 * behavior about the maximum speed in RPM the fan can spin. Setting via this
4057 * interface will switch the fan control to auto mode implicitly.
4058 */
acoustic_limit_threshold_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4059 static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
4060 struct kobj_attribute *attr,
4061 char *buf)
4062 {
4063 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4064 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4065
4066 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
4067 }
4068
acoustic_limit_threshold_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4069 static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
4070 struct kobj_attribute *attr,
4071 const char *buf,
4072 size_t count)
4073 {
4074 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4075 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4076
4077 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4078 PP_OD_EDIT_ACOUSTIC_LIMIT,
4079 buf,
4080 count);
4081 }
4082
acoustic_limit_threshold_visible(struct amdgpu_device * adev)4083 static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
4084 {
4085 umode_t umode = 0000;
4086
4087 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
4088 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4089
4090 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
4091 umode |= S_IWUSR;
4092
4093 return umode;
4094 }
4095
4096 /**
4097 * DOC: acoustic_target_rpm_threshold
4098 *
4099 * The amdgpu driver provides a sysfs API for checking and adjusting the
4100 * acoustic target in RPM for fan control.
4101 *
4102 * Reading back the file shows you the current setting and the permitted
4103 * ranges if changable.
4104 *
4105 * Writing an integer to the file, change the setting accordingly.
4106 *
4107 * When you have finished the editing, write "c" (commit) to the file to commit
4108 * your changes.
4109 *
4110 * If you want to reset to the default value, write "r" (reset) to the file to
4111 * reset them
4112 *
4113 * This setting works under auto fan control mode only. It can co-exist with
4114 * other settings which can work also under auto mode. It adjusts the PMFW's
4115 * behavior about the maximum speed in RPM the fan can spin when ASIC
4116 * temperature is not greater than target temperature. Setting via this
4117 * interface will switch the fan control to auto mode implicitly.
4118 */
acoustic_target_threshold_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4119 static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
4120 struct kobj_attribute *attr,
4121 char *buf)
4122 {
4123 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4124 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4125
4126 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
4127 }
4128
acoustic_target_threshold_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4129 static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
4130 struct kobj_attribute *attr,
4131 const char *buf,
4132 size_t count)
4133 {
4134 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4135 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4136
4137 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4138 PP_OD_EDIT_ACOUSTIC_TARGET,
4139 buf,
4140 count);
4141 }
4142
acoustic_target_threshold_visible(struct amdgpu_device * adev)4143 static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
4144 {
4145 umode_t umode = 0000;
4146
4147 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
4148 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4149
4150 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
4151 umode |= S_IWUSR;
4152
4153 return umode;
4154 }
4155
4156 /**
4157 * DOC: fan_target_temperature
4158 *
4159 * The amdgpu driver provides a sysfs API for checking and adjusting the
4160 * target tempeature in Celsius degree for fan control.
4161 *
4162 * Reading back the file shows you the current setting and the permitted
4163 * ranges if changable.
4164 *
4165 * Writing an integer to the file, change the setting accordingly.
4166 *
4167 * When you have finished the editing, write "c" (commit) to the file to commit
4168 * your changes.
4169 *
4170 * If you want to reset to the default value, write "r" (reset) to the file to
4171 * reset them
4172 *
4173 * This setting works under auto fan control mode only. It can co-exist with
4174 * other settings which can work also under auto mode. Paring with the
4175 * acoustic_target_rpm_threshold setting, they define the maximum speed in
4176 * RPM the fan can spin when ASIC temperature is not greater than target
4177 * temperature. Setting via this interface will switch the fan control to
4178 * auto mode implicitly.
4179 */
fan_target_temperature_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4180 static ssize_t fan_target_temperature_show(struct kobject *kobj,
4181 struct kobj_attribute *attr,
4182 char *buf)
4183 {
4184 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4185 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4186
4187 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
4188 }
4189
fan_target_temperature_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4190 static ssize_t fan_target_temperature_store(struct kobject *kobj,
4191 struct kobj_attribute *attr,
4192 const char *buf,
4193 size_t count)
4194 {
4195 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4196 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4197
4198 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4199 PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
4200 buf,
4201 count);
4202 }
4203
fan_target_temperature_visible(struct amdgpu_device * adev)4204 static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
4205 {
4206 umode_t umode = 0000;
4207
4208 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
4209 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4210
4211 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
4212 umode |= S_IWUSR;
4213
4214 return umode;
4215 }
4216
4217 /**
4218 * DOC: fan_minimum_pwm
4219 *
4220 * The amdgpu driver provides a sysfs API for checking and adjusting the
4221 * minimum fan speed in PWM.
4222 *
4223 * Reading back the file shows you the current setting and the permitted
4224 * ranges if changable.
4225 *
4226 * Writing an integer to the file, change the setting accordingly.
4227 *
4228 * When you have finished the editing, write "c" (commit) to the file to commit
4229 * your changes.
4230 *
4231 * If you want to reset to the default value, write "r" (reset) to the file to
4232 * reset them
4233 *
4234 * This setting works under auto fan control mode only. It can co-exist with
4235 * other settings which can work also under auto mode. It adjusts the PMFW's
4236 * behavior about the minimum fan speed in PWM the fan should spin. Setting
4237 * via this interface will switch the fan control to auto mode implicitly.
4238 */
fan_minimum_pwm_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4239 static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
4240 struct kobj_attribute *attr,
4241 char *buf)
4242 {
4243 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4244 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4245
4246 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
4247 }
4248
fan_minimum_pwm_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4249 static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
4250 struct kobj_attribute *attr,
4251 const char *buf,
4252 size_t count)
4253 {
4254 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4255 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4256
4257 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4258 PP_OD_EDIT_FAN_MINIMUM_PWM,
4259 buf,
4260 count);
4261 }
4262
fan_minimum_pwm_visible(struct amdgpu_device * adev)4263 static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
4264 {
4265 umode_t umode = 0000;
4266
4267 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
4268 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4269
4270 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
4271 umode |= S_IWUSR;
4272
4273 return umode;
4274 }
4275
4276 /**
4277 * DOC: fan_zero_rpm_enable
4278 *
4279 * The amdgpu driver provides a sysfs API for checking and adjusting the
4280 * zero RPM feature.
4281 *
4282 * Reading back the file shows you the current setting and the permitted
4283 * ranges if changable.
4284 *
4285 * Writing an integer to the file, change the setting accordingly.
4286 *
4287 * When you have finished the editing, write "c" (commit) to the file to commit
4288 * your changes.
4289 *
4290 * If you want to reset to the default value, write "r" (reset) to the file to
4291 * reset them.
4292 */
fan_zero_rpm_enable_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4293 static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
4294 struct kobj_attribute *attr,
4295 char *buf)
4296 {
4297 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4298 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4299
4300 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
4301 }
4302
fan_zero_rpm_enable_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4303 static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
4304 struct kobj_attribute *attr,
4305 const char *buf,
4306 size_t count)
4307 {
4308 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4309 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4310
4311 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4312 PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
4313 buf,
4314 count);
4315 }
4316
fan_zero_rpm_enable_visible(struct amdgpu_device * adev)4317 static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
4318 {
4319 umode_t umode = 0000;
4320
4321 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
4322 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4323
4324 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
4325 umode |= S_IWUSR;
4326
4327 return umode;
4328 }
4329
4330 /**
4331 * DOC: fan_zero_rpm_stop_temperature
4332 *
4333 * The amdgpu driver provides a sysfs API for checking and adjusting the
4334 * zero RPM stop temperature feature.
4335 *
4336 * Reading back the file shows you the current setting and the permitted
4337 * ranges if changable.
4338 *
4339 * Writing an integer to the file, change the setting accordingly.
4340 *
4341 * When you have finished the editing, write "c" (commit) to the file to commit
4342 * your changes.
4343 *
4344 * If you want to reset to the default value, write "r" (reset) to the file to
4345 * reset them.
4346 *
4347 * This setting works only if the Zero RPM setting is enabled. It adjusts the
4348 * temperature below which the fan can stop.
4349 */
fan_zero_rpm_stop_temp_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4350 static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
4351 struct kobj_attribute *attr,
4352 char *buf)
4353 {
4354 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4355 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4356
4357 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
4358 }
4359
fan_zero_rpm_stop_temp_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4360 static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
4361 struct kobj_attribute *attr,
4362 const char *buf,
4363 size_t count)
4364 {
4365 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4366 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4367
4368 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4369 PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
4370 buf,
4371 count);
4372 }
4373
fan_zero_rpm_stop_temp_visible(struct amdgpu_device * adev)4374 static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
4375 {
4376 umode_t umode = 0000;
4377
4378 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
4379 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4380
4381 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
4382 umode |= S_IWUSR;
4383
4384 return umode;
4385 }
4386
4387 static struct od_feature_set amdgpu_od_set = {
4388 .containers = {
4389 [0] = {
4390 .name = "fan_ctrl",
4391 .sub_feature = {
4392 [0] = {
4393 .name = "fan_curve",
4394 .ops = {
4395 .is_visible = fan_curve_visible,
4396 .show = fan_curve_show,
4397 .store = fan_curve_store,
4398 },
4399 },
4400 [1] = {
4401 .name = "acoustic_limit_rpm_threshold",
4402 .ops = {
4403 .is_visible = acoustic_limit_threshold_visible,
4404 .show = acoustic_limit_threshold_show,
4405 .store = acoustic_limit_threshold_store,
4406 },
4407 },
4408 [2] = {
4409 .name = "acoustic_target_rpm_threshold",
4410 .ops = {
4411 .is_visible = acoustic_target_threshold_visible,
4412 .show = acoustic_target_threshold_show,
4413 .store = acoustic_target_threshold_store,
4414 },
4415 },
4416 [3] = {
4417 .name = "fan_target_temperature",
4418 .ops = {
4419 .is_visible = fan_target_temperature_visible,
4420 .show = fan_target_temperature_show,
4421 .store = fan_target_temperature_store,
4422 },
4423 },
4424 [4] = {
4425 .name = "fan_minimum_pwm",
4426 .ops = {
4427 .is_visible = fan_minimum_pwm_visible,
4428 .show = fan_minimum_pwm_show,
4429 .store = fan_minimum_pwm_store,
4430 },
4431 },
4432 [5] = {
4433 .name = "fan_zero_rpm_enable",
4434 .ops = {
4435 .is_visible = fan_zero_rpm_enable_visible,
4436 .show = fan_zero_rpm_enable_show,
4437 .store = fan_zero_rpm_enable_store,
4438 },
4439 },
4440 [6] = {
4441 .name = "fan_zero_rpm_stop_temperature",
4442 .ops = {
4443 .is_visible = fan_zero_rpm_stop_temp_visible,
4444 .show = fan_zero_rpm_stop_temp_show,
4445 .store = fan_zero_rpm_stop_temp_store,
4446 },
4447 },
4448 },
4449 },
4450 },
4451 };
4452
od_kobj_release(struct kobject * kobj)4453 static void od_kobj_release(struct kobject *kobj)
4454 {
4455 struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
4456
4457 kfree(od_kobj);
4458 }
4459
4460 static const struct kobj_type od_ktype = {
4461 .release = od_kobj_release,
4462 .sysfs_ops = &kobj_sysfs_ops,
4463 };
4464
amdgpu_od_set_fini(struct amdgpu_device * adev)4465 static void amdgpu_od_set_fini(struct amdgpu_device *adev)
4466 {
4467 struct od_kobj *container, *container_next;
4468 struct od_attribute *attribute, *attribute_next;
4469
4470 if (list_empty(&adev->pm.od_kobj_list))
4471 return;
4472
4473 list_for_each_entry_safe(container, container_next,
4474 &adev->pm.od_kobj_list, entry) {
4475 list_del(&container->entry);
4476
4477 list_for_each_entry_safe(attribute, attribute_next,
4478 &container->attribute, entry) {
4479 list_del(&attribute->entry);
4480 sysfs_remove_file(&container->kobj,
4481 &attribute->attribute.attr);
4482 kfree(attribute);
4483 }
4484
4485 kobject_put(&container->kobj);
4486 }
4487 }
4488
amdgpu_is_od_feature_supported(struct amdgpu_device * adev,struct od_feature_ops * feature_ops)4489 static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4490 struct od_feature_ops *feature_ops)
4491 {
4492 umode_t mode;
4493
4494 if (!feature_ops->is_visible)
4495 return false;
4496
4497 /*
4498 * If the feature has no user read and write mode set,
4499 * we can assume the feature is actually not supported.(?)
4500 * And the revelant sysfs interface should not be exposed.
4501 */
4502 mode = feature_ops->is_visible(adev);
4503 if (mode & (S_IRUSR | S_IWUSR))
4504 return true;
4505
4506 return false;
4507 }
4508
amdgpu_od_is_self_contained(struct amdgpu_device * adev,struct od_feature_container * container)4509 static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4510 struct od_feature_container *container)
4511 {
4512 int i;
4513
4514 /*
4515 * If there is no valid entry within the container, the container
4516 * is recognized as a self contained container. And the valid entry
4517 * here means it has a valid naming and it is visible/supported by
4518 * the ASIC.
4519 */
4520 for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4521 if (container->sub_feature[i].name &&
4522 amdgpu_is_od_feature_supported(adev,
4523 &container->sub_feature[i].ops))
4524 return false;
4525 }
4526
4527 return true;
4528 }
4529
amdgpu_od_set_init(struct amdgpu_device * adev)4530 static int amdgpu_od_set_init(struct amdgpu_device *adev)
4531 {
4532 struct od_kobj *top_set, *sub_set;
4533 struct od_attribute *attribute;
4534 struct od_feature_container *container;
4535 struct od_feature_item *feature;
4536 int i, j;
4537 int ret;
4538
4539 /* Setup the top `gpu_od` directory which holds all other OD interfaces */
4540 top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
4541 if (!top_set)
4542 return -ENOMEM;
4543 list_add(&top_set->entry, &adev->pm.od_kobj_list);
4544
4545 ret = kobject_init_and_add(&top_set->kobj,
4546 &od_ktype,
4547 &adev->dev->kobj,
4548 "%s",
4549 "gpu_od");
4550 if (ret)
4551 goto err_out;
4552 INIT_LIST_HEAD(&top_set->attribute);
4553 top_set->priv = adev;
4554
4555 for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4556 container = &amdgpu_od_set.containers[i];
4557
4558 if (!container->name)
4559 continue;
4560
4561 /*
4562 * If there is valid entries within the container, the container
4563 * will be presented as a sub directory and all its holding entries
4564 * will be presented as plain files under it.
4565 * While if there is no valid entry within the container, the container
4566 * itself will be presented as a plain file under top `gpu_od` directory.
4567 */
4568 if (amdgpu_od_is_self_contained(adev, container)) {
4569 if (!amdgpu_is_od_feature_supported(adev,
4570 &container->ops))
4571 continue;
4572
4573 /*
4574 * The container is presented as a plain file under top `gpu_od`
4575 * directory.
4576 */
4577 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4578 if (!attribute) {
4579 ret = -ENOMEM;
4580 goto err_out;
4581 }
4582 list_add(&attribute->entry, &top_set->attribute);
4583
4584 attribute->attribute.attr.mode =
4585 container->ops.is_visible(adev);
4586 attribute->attribute.attr.name = container->name;
4587 attribute->attribute.show =
4588 container->ops.show;
4589 attribute->attribute.store =
4590 container->ops.store;
4591 ret = sysfs_create_file(&top_set->kobj,
4592 &attribute->attribute.attr);
4593 if (ret)
4594 goto err_out;
4595 } else {
4596 /* The container is presented as a sub directory. */
4597 sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
4598 if (!sub_set) {
4599 ret = -ENOMEM;
4600 goto err_out;
4601 }
4602 list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4603
4604 ret = kobject_init_and_add(&sub_set->kobj,
4605 &od_ktype,
4606 &top_set->kobj,
4607 "%s",
4608 container->name);
4609 if (ret)
4610 goto err_out;
4611 INIT_LIST_HEAD(&sub_set->attribute);
4612 sub_set->priv = adev;
4613
4614 for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4615 feature = &container->sub_feature[j];
4616 if (!feature->name)
4617 continue;
4618
4619 if (!amdgpu_is_od_feature_supported(adev,
4620 &feature->ops))
4621 continue;
4622
4623 /*
4624 * With the container presented as a sub directory, the entry within
4625 * it is presented as a plain file under the sub directory.
4626 */
4627 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4628 if (!attribute) {
4629 ret = -ENOMEM;
4630 goto err_out;
4631 }
4632 list_add(&attribute->entry, &sub_set->attribute);
4633
4634 attribute->attribute.attr.mode =
4635 feature->ops.is_visible(adev);
4636 attribute->attribute.attr.name = feature->name;
4637 attribute->attribute.show =
4638 feature->ops.show;
4639 attribute->attribute.store =
4640 feature->ops.store;
4641 ret = sysfs_create_file(&sub_set->kobj,
4642 &attribute->attribute.attr);
4643 if (ret)
4644 goto err_out;
4645 }
4646 }
4647 }
4648
4649 /*
4650 * If gpu_od is the only member in the list, that means gpu_od is an
4651 * empty directory, so remove it.
4652 */
4653 if (list_is_singular(&adev->pm.od_kobj_list))
4654 goto err_out;
4655
4656 return 0;
4657
4658 err_out:
4659 amdgpu_od_set_fini(adev);
4660
4661 return ret;
4662 }
4663
amdgpu_pm_sysfs_init(struct amdgpu_device * adev)4664 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4665 {
4666 enum amdgpu_sriov_vf_mode mode;
4667 uint32_t mask = 0;
4668 uint32_t tmp;
4669 int ret;
4670
4671 if (adev->pm.sysfs_initialized)
4672 return 0;
4673
4674 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4675
4676 if (adev->pm.dpm_enabled == 0)
4677 return 0;
4678
4679 mode = amdgpu_virt_get_sriov_vf_mode(adev);
4680
4681 /* under multi-vf mode, the hwmon attributes are all not supported */
4682 if (mode != SRIOV_VF_MODE_MULTI_VF) {
4683 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4684 DRIVER_NAME, adev,
4685 hwmon_groups);
4686 if (IS_ERR(adev->pm.int_hwmon_dev)) {
4687 ret = PTR_ERR(adev->pm.int_hwmon_dev);
4688 dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4689 return ret;
4690 }
4691 }
4692
4693 switch (mode) {
4694 case SRIOV_VF_MODE_ONE_VF:
4695 mask = ATTR_FLAG_ONEVF;
4696 break;
4697 case SRIOV_VF_MODE_MULTI_VF:
4698 mask = 0;
4699 break;
4700 case SRIOV_VF_MODE_BARE_METAL:
4701 default:
4702 mask = ATTR_FLAG_MASK_ALL;
4703 break;
4704 }
4705
4706 ret = amdgpu_device_attr_create_groups(adev,
4707 amdgpu_device_attrs,
4708 ARRAY_SIZE(amdgpu_device_attrs),
4709 mask,
4710 &adev->pm.pm_attr_list);
4711 if (ret)
4712 goto err_out0;
4713
4714 if (amdgpu_dpm_is_overdrive_supported(adev)) {
4715 ret = amdgpu_od_set_init(adev);
4716 if (ret)
4717 goto err_out1;
4718 } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
4719 dev_info(adev->dev, "overdrive feature is not supported\n");
4720 }
4721
4722 if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
4723 -EOPNOTSUPP) {
4724 ret = devm_device_add_group(adev->dev,
4725 &amdgpu_pm_policy_attr_group);
4726 if (ret)
4727 goto err_out0;
4728 }
4729
4730 if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
4731 ret = devm_device_add_group(adev->dev,
4732 &amdgpu_board_attr_group);
4733 if (ret)
4734 goto err_out0;
4735 if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
4736 (void *)&tmp) != -EOPNOTSUPP) {
4737 sysfs_add_file_to_group(&adev->dev->kobj,
4738 &dev_attr_cur_node_power_limit.attr,
4739 amdgpu_board_attr_group.name);
4740 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr,
4741 amdgpu_board_attr_group.name);
4742 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr,
4743 amdgpu_board_attr_group.name);
4744 sysfs_add_file_to_group(&adev->dev->kobj,
4745 &dev_attr_max_node_power_limit.attr,
4746 amdgpu_board_attr_group.name);
4747 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr,
4748 amdgpu_board_attr_group.name);
4749 }
4750 }
4751
4752 adev->pm.sysfs_initialized = true;
4753
4754 return 0;
4755
4756 err_out1:
4757 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4758 err_out0:
4759 if (adev->pm.int_hwmon_dev)
4760 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4761
4762 return ret;
4763 }
4764
amdgpu_pm_sysfs_fini(struct amdgpu_device * adev)4765 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4766 {
4767 amdgpu_od_set_fini(adev);
4768
4769 if (adev->pm.int_hwmon_dev)
4770 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4771
4772 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4773 }
4774
4775 /*
4776 * Debugfs info
4777 */
4778 #if defined(CONFIG_DEBUG_FS)
4779
amdgpu_debugfs_prints_cpu_info(struct seq_file * m,struct amdgpu_device * adev)4780 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4781 struct amdgpu_device *adev)
4782 {
4783 uint16_t *p_val;
4784 uint32_t size;
4785 int i;
4786 uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4787
4788 if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4789 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4790 GFP_KERNEL);
4791
4792 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4793 (void *)p_val, &size)) {
4794 for (i = 0; i < num_cpu_cores; i++)
4795 seq_printf(m, "\t%u MHz (CPU%d)\n",
4796 *(p_val + i), i);
4797 }
4798
4799 kfree(p_val);
4800 }
4801 }
4802
amdgpu_debugfs_pm_info_pp(struct seq_file * m,struct amdgpu_device * adev)4803 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4804 {
4805 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4806 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4807 uint32_t value;
4808 uint64_t value64 = 0;
4809 uint32_t query = 0;
4810 int size;
4811
4812 /* GPU Clocks */
4813 size = sizeof(value);
4814 seq_printf(m, "GFX Clocks and Power:\n");
4815
4816 amdgpu_debugfs_prints_cpu_info(m, adev);
4817
4818 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4819 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4820 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4821 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4822 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4823 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4824 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4825 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4826 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4827 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4828 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4829 seq_printf(m, "\t%u mV (VDDNB)\n", value);
4830 size = sizeof(uint32_t);
4831 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
4832 if (adev->flags & AMD_IS_APU)
4833 seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
4834 else
4835 seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
4836 }
4837 size = sizeof(uint32_t);
4838 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
4839 if (adev->flags & AMD_IS_APU)
4840 seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
4841 else
4842 seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
4843 }
4844 size = sizeof(value);
4845 seq_printf(m, "\n");
4846
4847 /* GPU Temp */
4848 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4849 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4850
4851 /* GPU Load */
4852 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4853 seq_printf(m, "GPU Load: %u %%\n", value);
4854 /* MEM Load */
4855 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4856 seq_printf(m, "MEM Load: %u %%\n", value);
4857 /* VCN Load */
4858 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
4859 seq_printf(m, "VCN Load: %u %%\n", value);
4860
4861 seq_printf(m, "\n");
4862
4863 /* SMC feature mask */
4864 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4865 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4866
4867 /* ASICs greater than CHIP_VEGA20 supports these sensors */
4868 if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4869 /* VCN clocks */
4870 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4871 if (!value) {
4872 seq_printf(m, "VCN: Powered down\n");
4873 } else {
4874 seq_printf(m, "VCN: Powered up\n");
4875 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4876 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4877 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4878 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4879 }
4880 }
4881 seq_printf(m, "\n");
4882 } else {
4883 /* UVD clocks */
4884 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4885 if (!value) {
4886 seq_printf(m, "UVD: Powered down\n");
4887 } else {
4888 seq_printf(m, "UVD: Powered up\n");
4889 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4890 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4891 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4892 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4893 }
4894 }
4895 seq_printf(m, "\n");
4896
4897 /* VCE clocks */
4898 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4899 if (!value) {
4900 seq_printf(m, "VCE: Powered down\n");
4901 } else {
4902 seq_printf(m, "VCE: Powered up\n");
4903 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4904 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4905 }
4906 }
4907 }
4908
4909 return 0;
4910 }
4911
4912 static const struct cg_flag_name clocks[] = {
4913 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4914 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4915 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4916 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4917 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4918 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4919 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4920 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4921 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4922 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4923 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4924 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4925 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4926 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4927 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4928 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4929 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4930 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4931 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4932 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4933 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4934 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4935 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4936 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4937 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4938 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4939 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4940 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4941 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
4942 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
4943 {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
4944 {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
4945 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
4946 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
4947 {0, NULL},
4948 };
4949
amdgpu_parse_cg_state(struct seq_file * m,u64 flags)4950 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
4951 {
4952 int i;
4953
4954 for (i = 0; clocks[i].flag; i++)
4955 seq_printf(m, "\t%s: %s\n", clocks[i].name,
4956 (flags & clocks[i].flag) ? "On" : "Off");
4957 }
4958
amdgpu_debugfs_pm_info_show(struct seq_file * m,void * unused)4959 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
4960 {
4961 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
4962 u64 flags = 0;
4963 int r;
4964
4965 r = amdgpu_pm_get_access(adev);
4966 if (r < 0)
4967 return r;
4968
4969 if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
4970 r = amdgpu_debugfs_pm_info_pp(m, adev);
4971 if (r)
4972 goto out;
4973 }
4974
4975 amdgpu_device_ip_get_clockgating_state(adev, &flags);
4976
4977 seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
4978 amdgpu_parse_cg_state(m, flags);
4979 seq_printf(m, "\n");
4980
4981 out:
4982 amdgpu_pm_put_access(adev);
4983
4984 return r;
4985 }
4986
4987 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
4988
4989 /*
4990 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
4991 *
4992 * Reads debug memory region allocated to PMFW
4993 */
amdgpu_pm_prv_buffer_read(struct file * f,char __user * buf,size_t size,loff_t * pos)4994 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
4995 size_t size, loff_t *pos)
4996 {
4997 struct amdgpu_device *adev = file_inode(f)->i_private;
4998 size_t smu_prv_buf_size;
4999 void *smu_prv_buf;
5000 int ret = 0;
5001
5002 ret = amdgpu_pm_dev_state_check(adev, true);
5003 if (ret)
5004 return ret;
5005
5006 ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
5007 if (ret)
5008 return ret;
5009
5010 if (!smu_prv_buf || !smu_prv_buf_size)
5011 return -EINVAL;
5012
5013 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
5014 smu_prv_buf_size);
5015 }
5016
5017 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
5018 .owner = THIS_MODULE,
5019 .open = simple_open,
5020 .read = amdgpu_pm_prv_buffer_read,
5021 .llseek = default_llseek,
5022 };
5023
5024 #endif
5025
amdgpu_debugfs_pm_init(struct amdgpu_device * adev)5026 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
5027 {
5028 #if defined(CONFIG_DEBUG_FS)
5029 struct drm_minor *minor = adev_to_drm(adev)->primary;
5030 struct dentry *root = minor->debugfs_root;
5031
5032 if (!adev->pm.dpm_enabled)
5033 return;
5034
5035 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
5036 &amdgpu_debugfs_pm_info_fops);
5037
5038 if (adev->pm.smu_prv_buffer_size > 0)
5039 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
5040 adev,
5041 &amdgpu_debugfs_pm_prv_buffer_fops,
5042 adev->pm.smu_prv_buffer_size);
5043
5044 amdgpu_dpm_stb_debug_fs_init(adev);
5045 #endif
5046 }
5047