1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
24 */
25
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <asm/processor.h>
37
38 #define MAX_NUM_OF_FEATURES_PER_SUBSET 8
39 #define MAX_NUM_OF_SUBSETS 8
40
41 #define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name)
42
43 struct od_attribute {
44 struct kobj_attribute attribute;
45 struct list_head entry;
46 };
47
48 struct od_kobj {
49 struct kobject kobj;
50 struct list_head entry;
51 struct list_head attribute;
52 void *priv;
53 };
54
55 struct od_feature_ops {
56 umode_t (*is_visible)(struct amdgpu_device *adev);
57 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
58 char *buf);
59 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
60 const char *buf, size_t count);
61 };
62
63 struct od_feature_item {
64 const char *name;
65 struct od_feature_ops ops;
66 };
67
68 struct od_feature_container {
69 char *name;
70 struct od_feature_ops ops;
71 struct od_feature_item sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
72 };
73
74 struct od_feature_set {
75 struct od_feature_container containers[MAX_NUM_OF_SUBSETS];
76 };
77
78 static const struct hwmon_temp_label {
79 enum PP_HWMON_TEMP channel;
80 const char *label;
81 } temp_label[] = {
82 {PP_TEMP_EDGE, "edge"},
83 {PP_TEMP_JUNCTION, "junction"},
84 {PP_TEMP_MEM, "mem"},
85 };
86
87 const char * const amdgpu_pp_profile_name[] = {
88 "BOOTUP_DEFAULT",
89 "3D_FULL_SCREEN",
90 "POWER_SAVING",
91 "VIDEO",
92 "VR",
93 "COMPUTE",
94 "CUSTOM",
95 "WINDOW_3D",
96 "CAPPED",
97 "UNCAPPED",
98 };
99
100 /**
101 * amdgpu_pm_dev_state_check - Check if device can be accessed.
102 * @adev: Target device.
103 * @runpm: Check runpm status for suspend state checks.
104 *
105 * Checks the state of the @adev for access. Return 0 if the device is
106 * accessible or a negative error code otherwise.
107 */
amdgpu_pm_dev_state_check(struct amdgpu_device * adev,bool runpm)108 static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
109 {
110 bool runpm_check = runpm ? adev->in_runpm : false;
111 bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT);
112
113 if (amdgpu_in_reset(adev) || !full_init)
114 return -EBUSY;
115
116 if (adev->in_suspend && !runpm_check)
117 return -EBUSY;
118
119 return 0;
120 }
121
122 /**
123 * amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
124 * @adev: Target device.
125 *
126 * Checks the state of the @adev for access. Use runtime pm API to resume if
127 * needed. Return 0 if the device is accessible or a negative error code
128 * otherwise.
129 */
amdgpu_pm_get_access(struct amdgpu_device * adev)130 static int amdgpu_pm_get_access(struct amdgpu_device *adev)
131 {
132 int ret;
133
134 ret = amdgpu_pm_dev_state_check(adev, true);
135 if (ret)
136 return ret;
137
138 return pm_runtime_resume_and_get(adev->dev);
139 }
140
141 /**
142 * amdgpu_pm_get_access_if_active - Check if device is active for access.
143 * @adev: Target device.
144 *
145 * Checks the state of the @adev for access. Use runtime pm API to determine
146 * if device is active. Allow access only if device is active.Return 0 if the
147 * device is accessible or a negative error code otherwise.
148 */
amdgpu_pm_get_access_if_active(struct amdgpu_device * adev)149 static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
150 {
151 int ret;
152
153 /* Ignore runpm status. If device is in suspended state, deny access */
154 ret = amdgpu_pm_dev_state_check(adev, false);
155 if (ret)
156 return ret;
157
158 /*
159 * Allow only if device is active. If runpm is disabled also, as in
160 * kernels without CONFIG_PM, allow access.
161 */
162 ret = pm_runtime_get_if_active(adev->dev);
163 if (!ret)
164 return -EPERM;
165
166 return 0;
167 }
168
169 /**
170 * amdgpu_pm_put_access - Put to auto suspend mode after a device access.
171 * @adev: Target device.
172 *
173 * Should be paired with amdgpu_pm_get_access* calls
174 */
amdgpu_pm_put_access(struct amdgpu_device * adev)175 static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
176 {
177 pm_runtime_put_autosuspend(adev->dev);
178 }
179
180 /**
181 * DOC: power_dpm_state
182 *
183 * The power_dpm_state file is a legacy interface and is only provided for
184 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
185 * certain power related parameters. The file power_dpm_state is used for this.
186 * It accepts the following arguments:
187 *
188 * - battery
189 *
190 * - balanced
191 *
192 * - performance
193 *
194 * battery
195 *
196 * On older GPUs, the vbios provided a special power state for battery
197 * operation. Selecting battery switched to this state. This is no
198 * longer provided on newer GPUs so the option does nothing in that case.
199 *
200 * balanced
201 *
202 * On older GPUs, the vbios provided a special power state for balanced
203 * operation. Selecting balanced switched to this state. This is no
204 * longer provided on newer GPUs so the option does nothing in that case.
205 *
206 * performance
207 *
208 * On older GPUs, the vbios provided a special power state for performance
209 * operation. Selecting performance switched to this state. This is no
210 * longer provided on newer GPUs so the option does nothing in that case.
211 *
212 */
213
amdgpu_get_power_dpm_state(struct device * dev,struct device_attribute * attr,char * buf)214 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
215 struct device_attribute *attr,
216 char *buf)
217 {
218 struct drm_device *ddev = dev_get_drvdata(dev);
219 struct amdgpu_device *adev = drm_to_adev(ddev);
220 enum amd_pm_state_type pm;
221 int ret;
222
223 ret = amdgpu_pm_get_access_if_active(adev);
224 if (ret)
225 return ret;
226
227 amdgpu_dpm_get_current_power_state(adev, &pm);
228
229 amdgpu_pm_put_access(adev);
230
231 return sysfs_emit(buf, "%s\n",
232 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
233 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
234 }
235
amdgpu_set_power_dpm_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)236 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
237 struct device_attribute *attr,
238 const char *buf,
239 size_t count)
240 {
241 struct drm_device *ddev = dev_get_drvdata(dev);
242 struct amdgpu_device *adev = drm_to_adev(ddev);
243 enum amd_pm_state_type state;
244 int ret;
245
246 if (strncmp("battery", buf, strlen("battery")) == 0)
247 state = POWER_STATE_TYPE_BATTERY;
248 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
249 state = POWER_STATE_TYPE_BALANCED;
250 else if (strncmp("performance", buf, strlen("performance")) == 0)
251 state = POWER_STATE_TYPE_PERFORMANCE;
252 else
253 return -EINVAL;
254
255 ret = amdgpu_pm_get_access(adev);
256 if (ret < 0)
257 return ret;
258
259 amdgpu_dpm_set_power_state(adev, state);
260
261 amdgpu_pm_put_access(adev);
262
263 return count;
264 }
265
266
267 /**
268 * DOC: power_dpm_force_performance_level
269 *
270 * The amdgpu driver provides a sysfs API for adjusting certain power
271 * related parameters. The file power_dpm_force_performance_level is
272 * used for this. It accepts the following arguments:
273 *
274 * - auto
275 *
276 * - low
277 *
278 * - high
279 *
280 * - manual
281 *
282 * - profile_standard
283 *
284 * - profile_min_sclk
285 *
286 * - profile_min_mclk
287 *
288 * - profile_peak
289 *
290 * auto
291 *
292 * When auto is selected, the driver will attempt to dynamically select
293 * the optimal power profile for current conditions in the driver.
294 *
295 * low
296 *
297 * When low is selected, the clocks are forced to the lowest power state.
298 *
299 * high
300 *
301 * When high is selected, the clocks are forced to the highest power state.
302 *
303 * manual
304 *
305 * When manual is selected, the user can manually adjust which power states
306 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
307 * and pp_dpm_pcie files and adjust the power state transition heuristics
308 * via the pp_power_profile_mode sysfs file.
309 *
310 * profile_standard
311 * profile_min_sclk
312 * profile_min_mclk
313 * profile_peak
314 *
315 * When the profiling modes are selected, clock and power gating are
316 * disabled and the clocks are set for different profiling cases. This
317 * mode is recommended for profiling specific work loads where you do
318 * not want clock or power gating for clock fluctuation to interfere
319 * with your results. profile_standard sets the clocks to a fixed clock
320 * level which varies from asic to asic. profile_min_sclk forces the sclk
321 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
322 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
323 *
324 */
325
amdgpu_get_power_dpm_force_performance_level(struct device * dev,struct device_attribute * attr,char * buf)326 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
327 struct device_attribute *attr,
328 char *buf)
329 {
330 struct drm_device *ddev = dev_get_drvdata(dev);
331 struct amdgpu_device *adev = drm_to_adev(ddev);
332 enum amd_dpm_forced_level level = 0xff;
333 int ret;
334
335 ret = amdgpu_pm_get_access_if_active(adev);
336 if (ret)
337 return ret;
338
339 level = amdgpu_dpm_get_performance_level(adev);
340
341 amdgpu_pm_put_access(adev);
342
343 return sysfs_emit(buf, "%s\n",
344 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
345 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
346 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
347 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
348 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
349 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
350 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
351 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
352 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
353 "unknown");
354 }
355
amdgpu_set_power_dpm_force_performance_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)356 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
357 struct device_attribute *attr,
358 const char *buf,
359 size_t count)
360 {
361 struct drm_device *ddev = dev_get_drvdata(dev);
362 struct amdgpu_device *adev = drm_to_adev(ddev);
363 enum amd_dpm_forced_level level;
364 int ret = 0;
365
366 if (strncmp("low", buf, strlen("low")) == 0) {
367 level = AMD_DPM_FORCED_LEVEL_LOW;
368 } else if (strncmp("high", buf, strlen("high")) == 0) {
369 level = AMD_DPM_FORCED_LEVEL_HIGH;
370 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
371 level = AMD_DPM_FORCED_LEVEL_AUTO;
372 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
373 level = AMD_DPM_FORCED_LEVEL_MANUAL;
374 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
375 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
376 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
377 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
378 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
379 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
380 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
381 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
382 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
383 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
384 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
385 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
386 } else {
387 return -EINVAL;
388 }
389
390 ret = amdgpu_pm_get_access(adev);
391 if (ret < 0)
392 return ret;
393
394 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
395 if (amdgpu_dpm_force_performance_level(adev, level)) {
396 amdgpu_pm_put_access(adev);
397 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
398 return -EINVAL;
399 }
400 /* override whatever a user ctx may have set */
401 adev->pm.stable_pstate_ctx = NULL;
402 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
403
404 amdgpu_pm_put_access(adev);
405
406 return count;
407 }
408
amdgpu_get_pp_num_states(struct device * dev,struct device_attribute * attr,char * buf)409 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
410 struct device_attribute *attr,
411 char *buf)
412 {
413 struct drm_device *ddev = dev_get_drvdata(dev);
414 struct amdgpu_device *adev = drm_to_adev(ddev);
415 struct pp_states_info data;
416 uint32_t i;
417 int buf_len, ret;
418
419 ret = amdgpu_pm_get_access_if_active(adev);
420 if (ret)
421 return ret;
422
423 if (amdgpu_dpm_get_pp_num_states(adev, &data))
424 memset(&data, 0, sizeof(data));
425
426 amdgpu_pm_put_access(adev);
427
428 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
429 for (i = 0; i < data.nums; i++)
430 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
431 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
432 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
433 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
434 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
435
436 return buf_len;
437 }
438
amdgpu_get_pp_cur_state(struct device * dev,struct device_attribute * attr,char * buf)439 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
440 struct device_attribute *attr,
441 char *buf)
442 {
443 struct drm_device *ddev = dev_get_drvdata(dev);
444 struct amdgpu_device *adev = drm_to_adev(ddev);
445 struct pp_states_info data = {0};
446 enum amd_pm_state_type pm = 0;
447 int i = 0, ret = 0;
448
449 ret = amdgpu_pm_get_access_if_active(adev);
450 if (ret)
451 return ret;
452
453 amdgpu_dpm_get_current_power_state(adev, &pm);
454
455 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
456
457 amdgpu_pm_put_access(adev);
458
459 if (ret)
460 return ret;
461
462 for (i = 0; i < data.nums; i++) {
463 if (pm == data.states[i])
464 break;
465 }
466
467 if (i == data.nums)
468 i = -EINVAL;
469
470 return sysfs_emit(buf, "%d\n", i);
471 }
472
amdgpu_get_pp_force_state(struct device * dev,struct device_attribute * attr,char * buf)473 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
474 struct device_attribute *attr,
475 char *buf)
476 {
477 struct drm_device *ddev = dev_get_drvdata(dev);
478 struct amdgpu_device *adev = drm_to_adev(ddev);
479
480 if (adev->pm.pp_force_state_enabled)
481 return amdgpu_get_pp_cur_state(dev, attr, buf);
482 else
483 return sysfs_emit(buf, "\n");
484 }
485
amdgpu_set_pp_force_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)486 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
487 struct device_attribute *attr,
488 const char *buf,
489 size_t count)
490 {
491 struct drm_device *ddev = dev_get_drvdata(dev);
492 struct amdgpu_device *adev = drm_to_adev(ddev);
493 enum amd_pm_state_type state = 0;
494 struct pp_states_info data;
495 unsigned long idx;
496 int ret;
497
498 adev->pm.pp_force_state_enabled = false;
499
500 if (strlen(buf) == 1)
501 return count;
502
503 ret = kstrtoul(buf, 0, &idx);
504 if (ret || idx >= ARRAY_SIZE(data.states))
505 return -EINVAL;
506
507 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
508
509 ret = amdgpu_pm_get_access(adev);
510 if (ret < 0)
511 return ret;
512
513 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
514 if (ret)
515 goto err_out;
516
517 state = data.states[idx];
518
519 /* only set user selected power states */
520 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
521 state != POWER_STATE_TYPE_DEFAULT) {
522 ret = amdgpu_dpm_dispatch_task(adev,
523 AMD_PP_TASK_ENABLE_USER_STATE, &state);
524 if (ret)
525 goto err_out;
526
527 adev->pm.pp_force_state_enabled = true;
528 }
529
530 amdgpu_pm_put_access(adev);
531
532 return count;
533
534 err_out:
535 amdgpu_pm_put_access(adev);
536
537 return ret;
538 }
539
540 /**
541 * DOC: pp_table
542 *
543 * The amdgpu driver provides a sysfs API for uploading new powerplay
544 * tables. The file pp_table is used for this. Reading the file
545 * will dump the current power play table. Writing to the file
546 * will attempt to upload a new powerplay table and re-initialize
547 * powerplay using that new table.
548 *
549 */
550
amdgpu_get_pp_table(struct device * dev,struct device_attribute * attr,char * buf)551 static ssize_t amdgpu_get_pp_table(struct device *dev,
552 struct device_attribute *attr,
553 char *buf)
554 {
555 struct drm_device *ddev = dev_get_drvdata(dev);
556 struct amdgpu_device *adev = drm_to_adev(ddev);
557 char *table = NULL;
558 int size, ret;
559
560 ret = amdgpu_pm_get_access_if_active(adev);
561 if (ret)
562 return ret;
563
564 size = amdgpu_dpm_get_pp_table(adev, &table);
565
566 amdgpu_pm_put_access(adev);
567
568 if (size <= 0)
569 return size;
570
571 if (size >= PAGE_SIZE)
572 size = PAGE_SIZE - 1;
573
574 memcpy(buf, table, size);
575
576 return size;
577 }
578
amdgpu_set_pp_table(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)579 static ssize_t amdgpu_set_pp_table(struct device *dev,
580 struct device_attribute *attr,
581 const char *buf,
582 size_t count)
583 {
584 struct drm_device *ddev = dev_get_drvdata(dev);
585 struct amdgpu_device *adev = drm_to_adev(ddev);
586 int ret = 0;
587
588 ret = amdgpu_pm_get_access(adev);
589 if (ret < 0)
590 return ret;
591
592 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
593
594 amdgpu_pm_put_access(adev);
595
596 if (ret)
597 return ret;
598
599 return count;
600 }
601
602 /**
603 * DOC: pp_od_clk_voltage
604 *
605 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
606 * in each power level within a power state. The pp_od_clk_voltage is used for
607 * this.
608 *
609 * Note that the actual memory controller clock rate are exposed, not
610 * the effective memory clock of the DRAMs. To translate it, use the
611 * following formula:
612 *
613 * Clock conversion (Mhz):
614 *
615 * HBM: effective_memory_clock = memory_controller_clock * 1
616 *
617 * G5: effective_memory_clock = memory_controller_clock * 1
618 *
619 * G6: effective_memory_clock = memory_controller_clock * 2
620 *
621 * DRAM data rate (MT/s):
622 *
623 * HBM: effective_memory_clock * 2 = data_rate
624 *
625 * G5: effective_memory_clock * 4 = data_rate
626 *
627 * G6: effective_memory_clock * 8 = data_rate
628 *
629 * Bandwidth (MB/s):
630 *
631 * data_rate * vram_bit_width / 8 = memory_bandwidth
632 *
633 * Some examples:
634 *
635 * G5 on RX460:
636 *
637 * memory_controller_clock = 1750 Mhz
638 *
639 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
640 *
641 * data rate = 1750 * 4 = 7000 MT/s
642 *
643 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
644 *
645 * G6 on RX5700:
646 *
647 * memory_controller_clock = 875 Mhz
648 *
649 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
650 *
651 * data rate = 1750 * 8 = 14000 MT/s
652 *
653 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
654 *
655 * < For Vega10 and previous ASICs >
656 *
657 * Reading the file will display:
658 *
659 * - a list of engine clock levels and voltages labeled OD_SCLK
660 *
661 * - a list of memory clock levels and voltages labeled OD_MCLK
662 *
663 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
664 *
665 * To manually adjust these settings, first select manual using
666 * power_dpm_force_performance_level. Enter a new value for each
667 * level by writing a string that contains "s/m level clock voltage" to
668 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
669 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
670 * 810 mV. When you have edited all of the states as needed, write
671 * "c" (commit) to the file to commit your changes. If you want to reset to the
672 * default power levels, write "r" (reset) to the file to reset them.
673 *
674 *
675 * < For Vega20 and newer ASICs >
676 *
677 * Reading the file will display:
678 *
679 * - minimum and maximum engine clock labeled OD_SCLK
680 *
681 * - minimum(not available for Vega20 and Navi1x) and maximum memory
682 * clock labeled OD_MCLK
683 *
684 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
685 * They can be used to calibrate the sclk voltage curve. This is
686 * available for Vega20 and NV1X.
687 *
688 * - voltage offset(in mV) applied on target voltage calculation.
689 * This is available for Sienna Cichlid, Navy Flounder, Dimgrey
690 * Cavefish and some later SMU13 ASICs. For these ASICs, the target
691 * voltage calculation can be illustrated by "voltage = voltage
692 * calculated from v/f curve + overdrive vddgfx offset"
693 *
694 * - a list of valid ranges for sclk, mclk, voltage curve points
695 * or voltage offset labeled OD_RANGE
696 *
697 * < For APUs >
698 *
699 * Reading the file will display:
700 *
701 * - minimum and maximum engine clock labeled OD_SCLK
702 *
703 * - a list of valid ranges for sclk labeled OD_RANGE
704 *
705 * < For VanGogh >
706 *
707 * Reading the file will display:
708 *
709 * - minimum and maximum engine clock labeled OD_SCLK
710 * - minimum and maximum core clocks labeled OD_CCLK
711 *
712 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
713 *
714 * To manually adjust these settings:
715 *
716 * - First select manual using power_dpm_force_performance_level
717 *
718 * - For clock frequency setting, enter a new value by writing a
719 * string that contains "s/m index clock" to the file. The index
720 * should be 0 if to set minimum clock. And 1 if to set maximum
721 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
722 * "m 1 800" will update maximum mclk to be 800Mhz. For core
723 * clocks on VanGogh, the string contains "p core index clock".
724 * E.g., "p 2 0 800" would set the minimum core clock on core
725 * 2 to 800Mhz.
726 *
727 * For sclk voltage curve supported by Vega20 and NV1X, enter the new
728 * values by writing a string that contains "vc point clock voltage"
729 * to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
730 * 600" will update point1 with clock set as 300Mhz and voltage as 600mV.
731 * "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
732 * voltage 1000mV.
733 *
734 * For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
735 * Cavefish and some later SMU13 ASICs, enter the new value by writing a
736 * string that contains "vo offset". E.g., "vo -10" will update the extra
737 * voltage offset applied to the whole v/f curve line as -10mv.
738 *
739 * - When you have edited all of the states as needed, write "c" (commit)
740 * to the file to commit your changes
741 *
742 * - If you want to reset to the default power levels, write "r" (reset)
743 * to the file to reset them
744 *
745 */
746
amdgpu_set_pp_od_clk_voltage(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)747 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
748 struct device_attribute *attr,
749 const char *buf,
750 size_t count)
751 {
752 struct drm_device *ddev = dev_get_drvdata(dev);
753 struct amdgpu_device *adev = drm_to_adev(ddev);
754 int ret;
755 uint32_t parameter_size = 0;
756 long parameter[64];
757 char buf_cpy[128];
758 char *tmp_str;
759 char *sub_str;
760 const char delimiter[3] = {' ', '\n', '\0'};
761 uint32_t type;
762
763 if (count > 127 || count == 0)
764 return -EINVAL;
765
766 if (*buf == 's')
767 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
768 else if (*buf == 'p')
769 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
770 else if (*buf == 'm')
771 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
772 else if (*buf == 'r')
773 type = PP_OD_RESTORE_DEFAULT_TABLE;
774 else if (*buf == 'c')
775 type = PP_OD_COMMIT_DPM_TABLE;
776 else if (!strncmp(buf, "vc", 2))
777 type = PP_OD_EDIT_VDDC_CURVE;
778 else if (!strncmp(buf, "vo", 2))
779 type = PP_OD_EDIT_VDDGFX_OFFSET;
780 else
781 return -EINVAL;
782
783 memcpy(buf_cpy, buf, count);
784 buf_cpy[count] = 0;
785
786 tmp_str = buf_cpy;
787
788 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
789 (type == PP_OD_EDIT_VDDGFX_OFFSET))
790 tmp_str++;
791 while (isspace(*++tmp_str));
792
793 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
794 if (strlen(sub_str) == 0)
795 continue;
796 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
797 if (ret)
798 return -EINVAL;
799 parameter_size++;
800
801 if (!tmp_str)
802 break;
803
804 while (isspace(*tmp_str))
805 tmp_str++;
806 }
807
808 ret = amdgpu_pm_get_access(adev);
809 if (ret < 0)
810 return ret;
811
812 if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
813 type,
814 parameter,
815 parameter_size))
816 goto err_out;
817
818 if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
819 parameter, parameter_size))
820 goto err_out;
821
822 if (type == PP_OD_COMMIT_DPM_TABLE) {
823 if (amdgpu_dpm_dispatch_task(adev,
824 AMD_PP_TASK_READJUST_POWER_STATE,
825 NULL))
826 goto err_out;
827 }
828
829 amdgpu_pm_put_access(adev);
830
831 return count;
832
833 err_out:
834 amdgpu_pm_put_access(adev);
835
836 return -EINVAL;
837 }
838
amdgpu_get_pp_od_clk_voltage(struct device * dev,struct device_attribute * attr,char * buf)839 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
840 struct device_attribute *attr,
841 char *buf)
842 {
843 struct drm_device *ddev = dev_get_drvdata(dev);
844 struct amdgpu_device *adev = drm_to_adev(ddev);
845 int size = 0;
846 int ret;
847 enum pp_clock_type od_clocks[6] = {
848 OD_SCLK,
849 OD_MCLK,
850 OD_VDDC_CURVE,
851 OD_RANGE,
852 OD_VDDGFX_OFFSET,
853 OD_CCLK,
854 };
855 uint clk_index;
856
857 ret = amdgpu_pm_get_access_if_active(adev);
858 if (ret)
859 return ret;
860
861 for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
862 ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
863 if (ret)
864 break;
865 }
866 if (ret == -ENOENT) {
867 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
868 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
869 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
870 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
871 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
872 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
873 }
874
875 if (size == 0)
876 size = sysfs_emit(buf, "\n");
877
878 amdgpu_pm_put_access(adev);
879
880 return size;
881 }
882
883 /**
884 * DOC: pp_features
885 *
886 * The amdgpu driver provides a sysfs API for adjusting what powerplay
887 * features to be enabled. The file pp_features is used for this. And
888 * this is only available for Vega10 and later dGPUs.
889 *
890 * Reading back the file will show you the followings:
891 * - Current ppfeature masks
892 * - List of the all supported powerplay features with their naming,
893 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
894 *
895 * To manually enable or disable a specific feature, just set or clear
896 * the corresponding bit from original ppfeature masks and input the
897 * new ppfeature masks.
898 */
amdgpu_set_pp_features(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)899 static ssize_t amdgpu_set_pp_features(struct device *dev,
900 struct device_attribute *attr,
901 const char *buf,
902 size_t count)
903 {
904 struct drm_device *ddev = dev_get_drvdata(dev);
905 struct amdgpu_device *adev = drm_to_adev(ddev);
906 uint64_t featuremask;
907 int ret;
908
909 ret = kstrtou64(buf, 0, &featuremask);
910 if (ret)
911 return -EINVAL;
912
913 ret = amdgpu_pm_get_access(adev);
914 if (ret < 0)
915 return ret;
916
917 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
918
919 amdgpu_pm_put_access(adev);
920
921 if (ret)
922 return -EINVAL;
923
924 return count;
925 }
926
amdgpu_get_pp_features(struct device * dev,struct device_attribute * attr,char * buf)927 static ssize_t amdgpu_get_pp_features(struct device *dev,
928 struct device_attribute *attr,
929 char *buf)
930 {
931 struct drm_device *ddev = dev_get_drvdata(dev);
932 struct amdgpu_device *adev = drm_to_adev(ddev);
933 ssize_t size;
934 int ret;
935
936 ret = amdgpu_pm_get_access_if_active(adev);
937 if (ret)
938 return ret;
939
940 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
941 if (size <= 0)
942 size = sysfs_emit(buf, "\n");
943
944 amdgpu_pm_put_access(adev);
945
946 return size;
947 }
948
949 /**
950 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
951 *
952 * The amdgpu driver provides a sysfs API for adjusting what power levels
953 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
954 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
955 * this.
956 *
957 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
958 * Vega10 and later ASICs.
959 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
960 *
961 * Reading back the files will show you the available power levels within
962 * the power state and the clock information for those levels. If deep sleep is
963 * applied to a clock, the level will be denoted by a special level 'S:'
964 * E.g., ::
965 *
966 * S: 19Mhz *
967 * 0: 615Mhz
968 * 1: 800Mhz
969 * 2: 888Mhz
970 * 3: 1000Mhz
971 *
972 *
973 * To manually adjust these states, first select manual using
974 * power_dpm_force_performance_level.
975 * Secondly, enter a new value for each level by inputing a string that
976 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
977 * E.g.,
978 *
979 * .. code-block:: bash
980 *
981 * echo "4 5 6" > pp_dpm_sclk
982 *
983 * will enable sclk levels 4, 5, and 6.
984 *
985 * NOTE: change to the dcefclk max dpm level is not supported now
986 */
987
amdgpu_get_pp_dpm_clock(struct device * dev,enum pp_clock_type type,char * buf)988 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
989 enum pp_clock_type type,
990 char *buf)
991 {
992 struct drm_device *ddev = dev_get_drvdata(dev);
993 struct amdgpu_device *adev = drm_to_adev(ddev);
994 int size = 0;
995 int ret = 0;
996
997 ret = amdgpu_pm_get_access_if_active(adev);
998 if (ret)
999 return ret;
1000
1001 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1002 if (ret == -ENOENT)
1003 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1004
1005 if (size == 0)
1006 size = sysfs_emit(buf, "\n");
1007
1008 amdgpu_pm_put_access(adev);
1009
1010 return size;
1011 }
1012
1013 /*
1014 * Worst case: 32 bits individually specified, in octal at 12 characters
1015 * per line (+1 for \n).
1016 */
1017 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1018
amdgpu_read_mask(const char * buf,size_t count,uint32_t * mask)1019 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1020 {
1021 int ret;
1022 unsigned long level;
1023 char *sub_str = NULL;
1024 char *tmp;
1025 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1026 const char delimiter[3] = {' ', '\n', '\0'};
1027 size_t bytes;
1028
1029 *mask = 0;
1030
1031 bytes = min(count, sizeof(buf_cpy) - 1);
1032 memcpy(buf_cpy, buf, bytes);
1033 buf_cpy[bytes] = '\0';
1034 tmp = buf_cpy;
1035 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1036 if (strlen(sub_str)) {
1037 ret = kstrtoul(sub_str, 0, &level);
1038 if (ret || level > 31)
1039 return -EINVAL;
1040 *mask |= 1 << level;
1041 } else
1042 break;
1043 }
1044
1045 return 0;
1046 }
1047
amdgpu_set_pp_dpm_clock(struct device * dev,enum pp_clock_type type,const char * buf,size_t count)1048 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1049 enum pp_clock_type type,
1050 const char *buf,
1051 size_t count)
1052 {
1053 struct drm_device *ddev = dev_get_drvdata(dev);
1054 struct amdgpu_device *adev = drm_to_adev(ddev);
1055 int ret;
1056 uint32_t mask = 0;
1057
1058 ret = amdgpu_read_mask(buf, count, &mask);
1059 if (ret)
1060 return ret;
1061
1062 ret = amdgpu_pm_get_access(adev);
1063 if (ret < 0)
1064 return ret;
1065
1066 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1067
1068 amdgpu_pm_put_access(adev);
1069
1070 if (ret)
1071 return -EINVAL;
1072
1073 return count;
1074 }
1075
amdgpu_get_pp_dpm_sclk(struct device * dev,struct device_attribute * attr,char * buf)1076 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1077 struct device_attribute *attr,
1078 char *buf)
1079 {
1080 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1081 }
1082
amdgpu_set_pp_dpm_sclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1083 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1084 struct device_attribute *attr,
1085 const char *buf,
1086 size_t count)
1087 {
1088 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1089 }
1090
amdgpu_get_pp_dpm_mclk(struct device * dev,struct device_attribute * attr,char * buf)1091 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1092 struct device_attribute *attr,
1093 char *buf)
1094 {
1095 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1096 }
1097
amdgpu_set_pp_dpm_mclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1098 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1099 struct device_attribute *attr,
1100 const char *buf,
1101 size_t count)
1102 {
1103 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1104 }
1105
amdgpu_get_pp_dpm_socclk(struct device * dev,struct device_attribute * attr,char * buf)1106 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1107 struct device_attribute *attr,
1108 char *buf)
1109 {
1110 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1111 }
1112
amdgpu_set_pp_dpm_socclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1113 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1114 struct device_attribute *attr,
1115 const char *buf,
1116 size_t count)
1117 {
1118 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1119 }
1120
amdgpu_get_pp_dpm_fclk(struct device * dev,struct device_attribute * attr,char * buf)1121 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1122 struct device_attribute *attr,
1123 char *buf)
1124 {
1125 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1126 }
1127
amdgpu_set_pp_dpm_fclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1128 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1129 struct device_attribute *attr,
1130 const char *buf,
1131 size_t count)
1132 {
1133 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1134 }
1135
amdgpu_get_pp_dpm_vclk(struct device * dev,struct device_attribute * attr,char * buf)1136 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1137 struct device_attribute *attr,
1138 char *buf)
1139 {
1140 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1141 }
1142
amdgpu_set_pp_dpm_vclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1143 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1144 struct device_attribute *attr,
1145 const char *buf,
1146 size_t count)
1147 {
1148 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1149 }
1150
amdgpu_get_pp_dpm_vclk1(struct device * dev,struct device_attribute * attr,char * buf)1151 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1152 struct device_attribute *attr,
1153 char *buf)
1154 {
1155 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1156 }
1157
amdgpu_set_pp_dpm_vclk1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1158 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1159 struct device_attribute *attr,
1160 const char *buf,
1161 size_t count)
1162 {
1163 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1164 }
1165
amdgpu_get_pp_dpm_dclk(struct device * dev,struct device_attribute * attr,char * buf)1166 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1167 struct device_attribute *attr,
1168 char *buf)
1169 {
1170 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1171 }
1172
amdgpu_set_pp_dpm_dclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1173 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1174 struct device_attribute *attr,
1175 const char *buf,
1176 size_t count)
1177 {
1178 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1179 }
1180
amdgpu_get_pp_dpm_dclk1(struct device * dev,struct device_attribute * attr,char * buf)1181 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1182 struct device_attribute *attr,
1183 char *buf)
1184 {
1185 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1186 }
1187
amdgpu_set_pp_dpm_dclk1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1188 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1189 struct device_attribute *attr,
1190 const char *buf,
1191 size_t count)
1192 {
1193 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1194 }
1195
amdgpu_get_pp_dpm_dcefclk(struct device * dev,struct device_attribute * attr,char * buf)1196 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1197 struct device_attribute *attr,
1198 char *buf)
1199 {
1200 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1201 }
1202
amdgpu_set_pp_dpm_dcefclk(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1203 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1204 struct device_attribute *attr,
1205 const char *buf,
1206 size_t count)
1207 {
1208 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1209 }
1210
amdgpu_get_pp_dpm_pcie(struct device * dev,struct device_attribute * attr,char * buf)1211 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1212 struct device_attribute *attr,
1213 char *buf)
1214 {
1215 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1216 }
1217
amdgpu_set_pp_dpm_pcie(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1218 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1219 struct device_attribute *attr,
1220 const char *buf,
1221 size_t count)
1222 {
1223 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1224 }
1225
amdgpu_get_pp_sclk_od(struct device * dev,struct device_attribute * attr,char * buf)1226 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1227 struct device_attribute *attr,
1228 char *buf)
1229 {
1230 struct drm_device *ddev = dev_get_drvdata(dev);
1231 struct amdgpu_device *adev = drm_to_adev(ddev);
1232 uint32_t value = 0;
1233 int ret;
1234
1235 ret = amdgpu_pm_get_access_if_active(adev);
1236 if (ret)
1237 return ret;
1238
1239 value = amdgpu_dpm_get_sclk_od(adev);
1240
1241 amdgpu_pm_put_access(adev);
1242
1243 return sysfs_emit(buf, "%d\n", value);
1244 }
1245
amdgpu_set_pp_sclk_od(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1246 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1247 struct device_attribute *attr,
1248 const char *buf,
1249 size_t count)
1250 {
1251 struct drm_device *ddev = dev_get_drvdata(dev);
1252 struct amdgpu_device *adev = drm_to_adev(ddev);
1253 int ret;
1254 long int value;
1255
1256 ret = kstrtol(buf, 0, &value);
1257
1258 if (ret)
1259 return -EINVAL;
1260
1261 ret = amdgpu_pm_get_access(adev);
1262 if (ret < 0)
1263 return ret;
1264
1265 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1266
1267 amdgpu_pm_put_access(adev);
1268
1269 return count;
1270 }
1271
amdgpu_get_pp_mclk_od(struct device * dev,struct device_attribute * attr,char * buf)1272 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1273 struct device_attribute *attr,
1274 char *buf)
1275 {
1276 struct drm_device *ddev = dev_get_drvdata(dev);
1277 struct amdgpu_device *adev = drm_to_adev(ddev);
1278 uint32_t value = 0;
1279 int ret;
1280
1281 ret = amdgpu_pm_get_access_if_active(adev);
1282 if (ret)
1283 return ret;
1284
1285 value = amdgpu_dpm_get_mclk_od(adev);
1286
1287 amdgpu_pm_put_access(adev);
1288
1289 return sysfs_emit(buf, "%d\n", value);
1290 }
1291
amdgpu_set_pp_mclk_od(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1292 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1293 struct device_attribute *attr,
1294 const char *buf,
1295 size_t count)
1296 {
1297 struct drm_device *ddev = dev_get_drvdata(dev);
1298 struct amdgpu_device *adev = drm_to_adev(ddev);
1299 int ret;
1300 long int value;
1301
1302 ret = kstrtol(buf, 0, &value);
1303
1304 if (ret)
1305 return -EINVAL;
1306
1307 ret = amdgpu_pm_get_access(adev);
1308 if (ret < 0)
1309 return ret;
1310
1311 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1312
1313 amdgpu_pm_put_access(adev);
1314
1315 return count;
1316 }
1317
1318 /**
1319 * DOC: pp_power_profile_mode
1320 *
1321 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1322 * related to switching between power levels in a power state. The file
1323 * pp_power_profile_mode is used for this.
1324 *
1325 * Reading this file outputs a list of all of the predefined power profiles
1326 * and the relevant heuristics settings for that profile.
1327 *
1328 * To select a profile or create a custom profile, first select manual using
1329 * power_dpm_force_performance_level. Writing the number of a predefined
1330 * profile to pp_power_profile_mode will enable those heuristics. To
1331 * create a custom set of heuristics, write a string of numbers to the file
1332 * starting with the number of the custom profile along with a setting
1333 * for each heuristic parameter. Due to differences across asic families
1334 * the heuristic parameters vary from family to family. Additionally,
1335 * you can apply the custom heuristics to different clock domains. Each
1336 * clock domain is considered a distinct operation so if you modify the
1337 * gfxclk heuristics and then the memclk heuristics, the all of the
1338 * custom heuristics will be retained until you switch to another profile.
1339 *
1340 */
1341
amdgpu_get_pp_power_profile_mode(struct device * dev,struct device_attribute * attr,char * buf)1342 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1343 struct device_attribute *attr,
1344 char *buf)
1345 {
1346 struct drm_device *ddev = dev_get_drvdata(dev);
1347 struct amdgpu_device *adev = drm_to_adev(ddev);
1348 ssize_t size;
1349 int ret;
1350
1351 ret = amdgpu_pm_get_access_if_active(adev);
1352 if (ret)
1353 return ret;
1354
1355 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1356 if (size <= 0)
1357 size = sysfs_emit(buf, "\n");
1358
1359 amdgpu_pm_put_access(adev);
1360
1361 return size;
1362 }
1363
1364
amdgpu_set_pp_power_profile_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1365 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1366 struct device_attribute *attr,
1367 const char *buf,
1368 size_t count)
1369 {
1370 int ret;
1371 struct drm_device *ddev = dev_get_drvdata(dev);
1372 struct amdgpu_device *adev = drm_to_adev(ddev);
1373 uint32_t parameter_size = 0;
1374 long parameter[64];
1375 char *sub_str, buf_cpy[128];
1376 char *tmp_str;
1377 uint32_t i = 0;
1378 char tmp[2];
1379 long int profile_mode = 0;
1380 const char delimiter[3] = {' ', '\n', '\0'};
1381
1382 tmp[0] = *(buf);
1383 tmp[1] = '\0';
1384 ret = kstrtol(tmp, 0, &profile_mode);
1385 if (ret)
1386 return -EINVAL;
1387
1388 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1389 if (count < 2 || count > 127)
1390 return -EINVAL;
1391 while (isspace(*++buf))
1392 i++;
1393 memcpy(buf_cpy, buf, count-i);
1394 tmp_str = buf_cpy;
1395 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1396 if (strlen(sub_str) == 0)
1397 continue;
1398 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1399 if (ret)
1400 return -EINVAL;
1401 parameter_size++;
1402 if (!tmp_str)
1403 break;
1404 while (isspace(*tmp_str))
1405 tmp_str++;
1406 }
1407 }
1408 parameter[parameter_size] = profile_mode;
1409
1410 ret = amdgpu_pm_get_access(adev);
1411 if (ret < 0)
1412 return ret;
1413
1414 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1415
1416 amdgpu_pm_put_access(adev);
1417
1418 if (!ret)
1419 return count;
1420
1421 return -EINVAL;
1422 }
1423
amdgpu_pm_get_sensor_generic(struct amdgpu_device * adev,enum amd_pp_sensors sensor,void * query)1424 static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev,
1425 enum amd_pp_sensors sensor,
1426 void *query)
1427 {
1428 int r, size = sizeof(uint32_t);
1429
1430 r = amdgpu_pm_get_access_if_active(adev);
1431 if (r)
1432 return r;
1433
1434 /* get the sensor value */
1435 r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1436
1437 amdgpu_pm_put_access(adev);
1438
1439 return r;
1440 }
1441
1442 /**
1443 * DOC: gpu_busy_percent
1444 *
1445 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1446 * is as a percentage. The file gpu_busy_percent is used for this.
1447 * The SMU firmware computes a percentage of load based on the
1448 * aggregate activity level in the IP cores.
1449 */
amdgpu_get_gpu_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1450 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1451 struct device_attribute *attr,
1452 char *buf)
1453 {
1454 struct drm_device *ddev = dev_get_drvdata(dev);
1455 struct amdgpu_device *adev = drm_to_adev(ddev);
1456 unsigned int value;
1457 int r;
1458
1459 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1460 if (r)
1461 return r;
1462
1463 return sysfs_emit(buf, "%d\n", value);
1464 }
1465
1466 /**
1467 * DOC: mem_busy_percent
1468 *
1469 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1470 * is as a percentage. The file mem_busy_percent is used for this.
1471 * The SMU firmware computes a percentage of load based on the
1472 * aggregate activity level in the IP cores.
1473 */
amdgpu_get_mem_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1474 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1475 struct device_attribute *attr,
1476 char *buf)
1477 {
1478 struct drm_device *ddev = dev_get_drvdata(dev);
1479 struct amdgpu_device *adev = drm_to_adev(ddev);
1480 unsigned int value;
1481 int r;
1482
1483 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1484 if (r)
1485 return r;
1486
1487 return sysfs_emit(buf, "%d\n", value);
1488 }
1489
1490 /**
1491 * DOC: vcn_busy_percent
1492 *
1493 * The amdgpu driver provides a sysfs API for reading how busy the VCN
1494 * is as a percentage. The file vcn_busy_percent is used for this.
1495 * The SMU firmware computes a percentage of load based on the
1496 * aggregate activity level in the IP cores.
1497 */
amdgpu_get_vcn_busy_percent(struct device * dev,struct device_attribute * attr,char * buf)1498 static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
1499 struct device_attribute *attr,
1500 char *buf)
1501 {
1502 struct drm_device *ddev = dev_get_drvdata(dev);
1503 struct amdgpu_device *adev = drm_to_adev(ddev);
1504 unsigned int value;
1505 int r;
1506
1507 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
1508 if (r)
1509 return r;
1510
1511 return sysfs_emit(buf, "%d\n", value);
1512 }
1513
1514 /**
1515 * DOC: pcie_bw
1516 *
1517 * The amdgpu driver provides a sysfs API for estimating how much data
1518 * has been received and sent by the GPU in the last second through PCIe.
1519 * The file pcie_bw is used for this.
1520 * The Perf counters count the number of received and sent messages and return
1521 * those values, as well as the maximum payload size of a PCIe packet (mps).
1522 * Note that it is not possible to easily and quickly obtain the size of each
1523 * packet transmitted, so we output the max payload size (mps) to allow for
1524 * quick estimation of the PCIe bandwidth usage
1525 */
amdgpu_get_pcie_bw(struct device * dev,struct device_attribute * attr,char * buf)1526 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1527 struct device_attribute *attr,
1528 char *buf)
1529 {
1530 struct drm_device *ddev = dev_get_drvdata(dev);
1531 struct amdgpu_device *adev = drm_to_adev(ddev);
1532 uint64_t count0 = 0, count1 = 0;
1533 int ret;
1534
1535 if (adev->flags & AMD_IS_APU)
1536 return -ENODATA;
1537
1538 if (!adev->asic_funcs->get_pcie_usage)
1539 return -ENODATA;
1540
1541 ret = amdgpu_pm_get_access_if_active(adev);
1542 if (ret)
1543 return ret;
1544
1545 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1546
1547 amdgpu_pm_put_access(adev);
1548
1549 return sysfs_emit(buf, "%llu %llu %i\n",
1550 count0, count1, pcie_get_mps(adev->pdev));
1551 }
1552
1553 /**
1554 * DOC: unique_id
1555 *
1556 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1557 * The file unique_id is used for this.
1558 * This will provide a Unique ID that will persist from machine to machine
1559 *
1560 * NOTE: This will only work for GFX9 and newer. This file will be absent
1561 * on unsupported ASICs (GFX8 and older)
1562 */
amdgpu_get_unique_id(struct device * dev,struct device_attribute * attr,char * buf)1563 static ssize_t amdgpu_get_unique_id(struct device *dev,
1564 struct device_attribute *attr,
1565 char *buf)
1566 {
1567 struct drm_device *ddev = dev_get_drvdata(dev);
1568 struct amdgpu_device *adev = drm_to_adev(ddev);
1569
1570 if (adev->unique_id)
1571 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1572
1573 return 0;
1574 }
1575
1576 /**
1577 * DOC: thermal_throttling_logging
1578 *
1579 * Thermal throttling pulls down the clock frequency and thus the performance.
1580 * It's an useful mechanism to protect the chip from overheating. Since it
1581 * impacts performance, the user controls whether it is enabled and if so,
1582 * the log frequency.
1583 *
1584 * Reading back the file shows you the status(enabled or disabled) and
1585 * the interval(in seconds) between each thermal logging.
1586 *
1587 * Writing an integer to the file, sets a new logging interval, in seconds.
1588 * The value should be between 1 and 3600. If the value is less than 1,
1589 * thermal logging is disabled. Values greater than 3600 are ignored.
1590 */
amdgpu_get_thermal_throttling_logging(struct device * dev,struct device_attribute * attr,char * buf)1591 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1592 struct device_attribute *attr,
1593 char *buf)
1594 {
1595 struct drm_device *ddev = dev_get_drvdata(dev);
1596 struct amdgpu_device *adev = drm_to_adev(ddev);
1597
1598 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1599 adev_to_drm(adev)->unique,
1600 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1601 adev->throttling_logging_rs.interval / HZ + 1);
1602 }
1603
amdgpu_set_thermal_throttling_logging(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1604 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1605 struct device_attribute *attr,
1606 const char *buf,
1607 size_t count)
1608 {
1609 struct drm_device *ddev = dev_get_drvdata(dev);
1610 struct amdgpu_device *adev = drm_to_adev(ddev);
1611 long throttling_logging_interval;
1612 int ret = 0;
1613
1614 ret = kstrtol(buf, 0, &throttling_logging_interval);
1615 if (ret)
1616 return ret;
1617
1618 if (throttling_logging_interval > 3600)
1619 return -EINVAL;
1620
1621 if (throttling_logging_interval > 0) {
1622 /*
1623 * Reset the ratelimit timer internals.
1624 * This can effectively restart the timer.
1625 */
1626 ratelimit_state_reset_interval(&adev->throttling_logging_rs,
1627 (throttling_logging_interval - 1) * HZ);
1628 atomic_set(&adev->throttling_logging_enabled, 1);
1629 } else {
1630 atomic_set(&adev->throttling_logging_enabled, 0);
1631 }
1632
1633 return count;
1634 }
1635
1636 /**
1637 * DOC: apu_thermal_cap
1638 *
1639 * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1640 * limit temperature in millidegrees Celsius
1641 *
1642 * Reading back the file shows you core limit value
1643 *
1644 * Writing an integer to the file, sets a new thermal limit. The value
1645 * should be between 0 and 100. If the value is less than 0 or greater
1646 * than 100, then the write request will be ignored.
1647 */
amdgpu_get_apu_thermal_cap(struct device * dev,struct device_attribute * attr,char * buf)1648 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1649 struct device_attribute *attr,
1650 char *buf)
1651 {
1652 int ret, size;
1653 u32 limit;
1654 struct drm_device *ddev = dev_get_drvdata(dev);
1655 struct amdgpu_device *adev = drm_to_adev(ddev);
1656
1657 ret = amdgpu_pm_get_access_if_active(adev);
1658 if (ret)
1659 return ret;
1660
1661 ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1662 if (!ret)
1663 size = sysfs_emit(buf, "%u\n", limit);
1664 else
1665 size = sysfs_emit(buf, "failed to get thermal limit\n");
1666
1667 amdgpu_pm_put_access(adev);
1668
1669 return size;
1670 }
1671
amdgpu_set_apu_thermal_cap(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1672 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1673 struct device_attribute *attr,
1674 const char *buf,
1675 size_t count)
1676 {
1677 int ret;
1678 u32 value;
1679 struct drm_device *ddev = dev_get_drvdata(dev);
1680 struct amdgpu_device *adev = drm_to_adev(ddev);
1681
1682 ret = kstrtou32(buf, 10, &value);
1683 if (ret)
1684 return ret;
1685
1686 if (value > 100) {
1687 dev_err(dev, "Invalid argument !\n");
1688 return -EINVAL;
1689 }
1690
1691 ret = amdgpu_pm_get_access(adev);
1692 if (ret < 0)
1693 return ret;
1694
1695 ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1696 if (ret) {
1697 amdgpu_pm_put_access(adev);
1698 dev_err(dev, "failed to update thermal limit\n");
1699 return ret;
1700 }
1701
1702 amdgpu_pm_put_access(adev);
1703
1704 return count;
1705 }
1706
amdgpu_pm_metrics_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1707 static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
1708 struct amdgpu_device_attr *attr,
1709 uint32_t mask,
1710 enum amdgpu_device_attr_states *states)
1711 {
1712 if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
1713 *states = ATTR_STATE_UNSUPPORTED;
1714
1715 return 0;
1716 }
1717
amdgpu_get_pm_metrics(struct device * dev,struct device_attribute * attr,char * buf)1718 static ssize_t amdgpu_get_pm_metrics(struct device *dev,
1719 struct device_attribute *attr, char *buf)
1720 {
1721 struct drm_device *ddev = dev_get_drvdata(dev);
1722 struct amdgpu_device *adev = drm_to_adev(ddev);
1723 ssize_t size = 0;
1724 int ret;
1725
1726 ret = amdgpu_pm_get_access_if_active(adev);
1727 if (ret)
1728 return ret;
1729
1730 size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
1731
1732 amdgpu_pm_put_access(adev);
1733
1734 return size;
1735 }
1736
1737 /**
1738 * DOC: gpu_metrics
1739 *
1740 * The amdgpu driver provides a sysfs API for retrieving current gpu
1741 * metrics data. The file gpu_metrics is used for this. Reading the
1742 * file will dump all the current gpu metrics data.
1743 *
1744 * These data include temperature, frequency, engines utilization,
1745 * power consume, throttler status, fan speed and cpu core statistics(
1746 * available for APU only). That's it will give a snapshot of all sensors
1747 * at the same time.
1748 */
amdgpu_get_gpu_metrics(struct device * dev,struct device_attribute * attr,char * buf)1749 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1750 struct device_attribute *attr,
1751 char *buf)
1752 {
1753 struct drm_device *ddev = dev_get_drvdata(dev);
1754 struct amdgpu_device *adev = drm_to_adev(ddev);
1755 void *gpu_metrics;
1756 ssize_t size = 0;
1757 int ret;
1758
1759 ret = amdgpu_pm_get_access_if_active(adev);
1760 if (ret)
1761 return ret;
1762
1763 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1764 if (size <= 0)
1765 goto out;
1766
1767 if (size >= PAGE_SIZE)
1768 size = PAGE_SIZE - 1;
1769
1770 memcpy(buf, gpu_metrics, size);
1771
1772 out:
1773 amdgpu_pm_put_access(adev);
1774
1775 return size;
1776 }
1777
amdgpu_show_powershift_percent(struct device * dev,char * buf,enum amd_pp_sensors sensor)1778 static int amdgpu_show_powershift_percent(struct device *dev,
1779 char *buf, enum amd_pp_sensors sensor)
1780 {
1781 struct drm_device *ddev = dev_get_drvdata(dev);
1782 struct amdgpu_device *adev = drm_to_adev(ddev);
1783 uint32_t ss_power;
1784 int r = 0, i;
1785
1786 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1787 if (r == -EOPNOTSUPP) {
1788 /* sensor not available on dGPU, try to read from APU */
1789 adev = NULL;
1790 mutex_lock(&mgpu_info.mutex);
1791 for (i = 0; i < mgpu_info.num_gpu; i++) {
1792 if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1793 adev = mgpu_info.gpu_ins[i].adev;
1794 break;
1795 }
1796 }
1797 mutex_unlock(&mgpu_info.mutex);
1798 if (adev)
1799 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
1800 }
1801
1802 if (r)
1803 return r;
1804
1805 return sysfs_emit(buf, "%u%%\n", ss_power);
1806 }
1807
1808 /**
1809 * DOC: smartshift_apu_power
1810 *
1811 * The amdgpu driver provides a sysfs API for reporting APU power
1812 * shift in percentage if platform supports smartshift. Value 0 means that
1813 * there is no powershift and values between [1-100] means that the power
1814 * is shifted to APU, the percentage of boost is with respect to APU power
1815 * limit on the platform.
1816 */
1817
amdgpu_get_smartshift_apu_power(struct device * dev,struct device_attribute * attr,char * buf)1818 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1819 char *buf)
1820 {
1821 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1822 }
1823
1824 /**
1825 * DOC: smartshift_dgpu_power
1826 *
1827 * The amdgpu driver provides a sysfs API for reporting dGPU power
1828 * shift in percentage if platform supports smartshift. Value 0 means that
1829 * there is no powershift and values between [1-100] means that the power is
1830 * shifted to dGPU, the percentage of boost is with respect to dGPU power
1831 * limit on the platform.
1832 */
1833
amdgpu_get_smartshift_dgpu_power(struct device * dev,struct device_attribute * attr,char * buf)1834 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1835 char *buf)
1836 {
1837 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1838 }
1839
1840 /**
1841 * DOC: smartshift_bias
1842 *
1843 * The amdgpu driver provides a sysfs API for reporting the
1844 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1845 * and the default is 0. -100 sets maximum preference to APU
1846 * and 100 sets max perference to dGPU.
1847 */
1848
amdgpu_get_smartshift_bias(struct device * dev,struct device_attribute * attr,char * buf)1849 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1850 struct device_attribute *attr,
1851 char *buf)
1852 {
1853 int r = 0;
1854
1855 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1856
1857 return r;
1858 }
1859
amdgpu_set_smartshift_bias(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1860 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1861 struct device_attribute *attr,
1862 const char *buf, size_t count)
1863 {
1864 struct drm_device *ddev = dev_get_drvdata(dev);
1865 struct amdgpu_device *adev = drm_to_adev(ddev);
1866 int r = 0;
1867 int bias = 0;
1868
1869 r = kstrtoint(buf, 10, &bias);
1870 if (r)
1871 goto out;
1872
1873 r = amdgpu_pm_get_access(adev);
1874 if (r < 0)
1875 return r;
1876
1877 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1878 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1879 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1880 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1881
1882 amdgpu_smartshift_bias = bias;
1883 r = count;
1884
1885 /* TODO: update bias level with SMU message */
1886
1887 out:
1888 amdgpu_pm_put_access(adev);
1889
1890 return r;
1891 }
1892
ss_power_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1893 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1894 uint32_t mask, enum amdgpu_device_attr_states *states)
1895 {
1896 if (!amdgpu_device_supports_smart_shift(adev))
1897 *states = ATTR_STATE_UNSUPPORTED;
1898
1899 return 0;
1900 }
1901
ss_bias_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1902 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1903 uint32_t mask, enum amdgpu_device_attr_states *states)
1904 {
1905 uint32_t ss_power;
1906
1907 if (!amdgpu_device_supports_smart_shift(adev))
1908 *states = ATTR_STATE_UNSUPPORTED;
1909 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1910 (void *)&ss_power))
1911 *states = ATTR_STATE_UNSUPPORTED;
1912 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1913 (void *)&ss_power))
1914 *states = ATTR_STATE_UNSUPPORTED;
1915
1916 return 0;
1917 }
1918
pp_od_clk_voltage_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1919 static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1920 uint32_t mask, enum amdgpu_device_attr_states *states)
1921 {
1922 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1923
1924 *states = ATTR_STATE_SUPPORTED;
1925
1926 if (!amdgpu_dpm_is_overdrive_supported(adev)) {
1927 *states = ATTR_STATE_UNSUPPORTED;
1928 return 0;
1929 }
1930
1931 /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
1932 if (gc_ver == IP_VERSION(9, 4, 3) ||
1933 gc_ver == IP_VERSION(9, 4, 4) ||
1934 gc_ver == IP_VERSION(9, 5, 0)) {
1935 if (amdgpu_sriov_multi_vf_mode(adev))
1936 *states = ATTR_STATE_UNSUPPORTED;
1937 return 0;
1938 }
1939
1940 if (!(attr->flags & mask))
1941 *states = ATTR_STATE_UNSUPPORTED;
1942
1943 return 0;
1944 }
1945
pp_dpm_dcefclk_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1946 static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1947 uint32_t mask, enum amdgpu_device_attr_states *states)
1948 {
1949 struct device_attribute *dev_attr = &attr->dev_attr;
1950 uint32_t gc_ver;
1951
1952 *states = ATTR_STATE_SUPPORTED;
1953
1954 if (!(attr->flags & mask)) {
1955 *states = ATTR_STATE_UNSUPPORTED;
1956 return 0;
1957 }
1958
1959 gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1960 /* dcefclk node is not available on gfx 11.0.3 sriov */
1961 if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
1962 gc_ver < IP_VERSION(9, 0, 0) ||
1963 !amdgpu_device_has_display_hardware(adev))
1964 *states = ATTR_STATE_UNSUPPORTED;
1965
1966 /* SMU MP1 does not support dcefclk level setting,
1967 * setting should not be allowed from VF if not in one VF mode.
1968 */
1969 if (gc_ver >= IP_VERSION(10, 0, 0) ||
1970 (amdgpu_sriov_multi_vf_mode(adev))) {
1971 dev_attr->attr.mode &= ~S_IWUGO;
1972 dev_attr->store = NULL;
1973 }
1974
1975 return 0;
1976 }
1977
pp_dpm_clk_default_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)1978 static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1979 uint32_t mask, enum amdgpu_device_attr_states *states)
1980 {
1981 struct device_attribute *dev_attr = &attr->dev_attr;
1982 enum amdgpu_device_attr_id attr_id = attr->attr_id;
1983 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
1984 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1985
1986 *states = ATTR_STATE_SUPPORTED;
1987
1988 if (!(attr->flags & mask)) {
1989 *states = ATTR_STATE_UNSUPPORTED;
1990 return 0;
1991 }
1992
1993 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1994 if (gc_ver < IP_VERSION(9, 0, 0))
1995 *states = ATTR_STATE_UNSUPPORTED;
1996 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1997 if (mp1_ver < IP_VERSION(10, 0, 0))
1998 *states = ATTR_STATE_UNSUPPORTED;
1999 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2000 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2001 gc_ver == IP_VERSION(10, 3, 3) ||
2002 gc_ver == IP_VERSION(10, 3, 6) ||
2003 gc_ver == IP_VERSION(10, 3, 7) ||
2004 gc_ver == IP_VERSION(10, 3, 0) ||
2005 gc_ver == IP_VERSION(10, 1, 2) ||
2006 gc_ver == IP_VERSION(11, 0, 0) ||
2007 gc_ver == IP_VERSION(11, 0, 1) ||
2008 gc_ver == IP_VERSION(11, 0, 4) ||
2009 gc_ver == IP_VERSION(11, 5, 0) ||
2010 gc_ver == IP_VERSION(11, 0, 2) ||
2011 gc_ver == IP_VERSION(11, 0, 3) ||
2012 gc_ver == IP_VERSION(9, 4, 3) ||
2013 gc_ver == IP_VERSION(9, 4, 4) ||
2014 gc_ver == IP_VERSION(9, 5, 0)))
2015 *states = ATTR_STATE_UNSUPPORTED;
2016 } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2017 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2018 gc_ver == IP_VERSION(10, 3, 0) ||
2019 gc_ver == IP_VERSION(11, 0, 2) ||
2020 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2021 *states = ATTR_STATE_UNSUPPORTED;
2022 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2023 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2024 gc_ver == IP_VERSION(10, 3, 3) ||
2025 gc_ver == IP_VERSION(10, 3, 6) ||
2026 gc_ver == IP_VERSION(10, 3, 7) ||
2027 gc_ver == IP_VERSION(10, 3, 0) ||
2028 gc_ver == IP_VERSION(10, 1, 2) ||
2029 gc_ver == IP_VERSION(11, 0, 0) ||
2030 gc_ver == IP_VERSION(11, 0, 1) ||
2031 gc_ver == IP_VERSION(11, 0, 4) ||
2032 gc_ver == IP_VERSION(11, 5, 0) ||
2033 gc_ver == IP_VERSION(11, 0, 2) ||
2034 gc_ver == IP_VERSION(11, 0, 3) ||
2035 gc_ver == IP_VERSION(9, 4, 3) ||
2036 gc_ver == IP_VERSION(9, 4, 4) ||
2037 gc_ver == IP_VERSION(9, 5, 0)))
2038 *states = ATTR_STATE_UNSUPPORTED;
2039 } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2040 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2041 gc_ver == IP_VERSION(10, 3, 0) ||
2042 gc_ver == IP_VERSION(11, 0, 2) ||
2043 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2044 *states = ATTR_STATE_UNSUPPORTED;
2045 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2046 if (gc_ver == IP_VERSION(9, 4, 2) ||
2047 gc_ver == IP_VERSION(9, 4, 3) ||
2048 gc_ver == IP_VERSION(9, 4, 4) ||
2049 gc_ver == IP_VERSION(9, 5, 0))
2050 *states = ATTR_STATE_UNSUPPORTED;
2051 }
2052
2053 switch (gc_ver) {
2054 case IP_VERSION(9, 4, 1):
2055 case IP_VERSION(9, 4, 2):
2056 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2057 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2058 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2059 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2060 dev_attr->attr.mode &= ~S_IWUGO;
2061 dev_attr->store = NULL;
2062 }
2063 break;
2064 default:
2065 break;
2066 }
2067
2068 /* setting should not be allowed from VF if not in one VF mode */
2069 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
2070 dev_attr->attr.mode &= ~S_IWUGO;
2071 dev_attr->store = NULL;
2072 }
2073
2074 return 0;
2075 }
2076
2077 /**
2078 * DOC: board
2079 *
2080 * Certain SOCs can support various board attributes reporting. This is useful
2081 * for user application to monitor various board reated attributes.
2082 *
2083 * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
2084 * seven types of attributes are reported. Baseboard temperature and
2085 * gpu board temperature are reported as binary files. Npm status, current node power limit,
2086 * max node power limit, node power and global ppt residency is reported as ASCII text file.
2087 *
2088 * * .. code-block:: console
2089 *
2090 * hexdump /sys/bus/pci/devices/.../board/baseboard_temp
2091 *
2092 * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
2093 *
2094 * hexdump /sys/bus/pci/devices/.../board/npm_status
2095 *
2096 * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit
2097 *
2098 * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit
2099 *
2100 * hexdump /sys/bus/pci/devices/.../board/node_power
2101 *
2102 * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid
2103 */
2104
2105 /**
2106 * DOC: baseboard_temp
2107 *
2108 * The amdgpu driver provides a sysfs API for retrieving current baseboard
2109 * temperature metrics data. The file baseboard_temp is used for this.
2110 * Reading the file will dump all the current baseboard temperature metrics data.
2111 */
amdgpu_get_baseboard_temp_metrics(struct device * dev,struct device_attribute * attr,char * buf)2112 static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
2113 struct device_attribute *attr, char *buf)
2114 {
2115 struct drm_device *ddev = dev_get_drvdata(dev);
2116 struct amdgpu_device *adev = drm_to_adev(ddev);
2117 ssize_t size;
2118 int ret;
2119
2120 ret = amdgpu_pm_get_access_if_active(adev);
2121 if (ret)
2122 return ret;
2123
2124 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
2125 if (size <= 0)
2126 goto out;
2127 if (size >= PAGE_SIZE) {
2128 ret = -ENOSPC;
2129 goto out;
2130 }
2131
2132 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
2133
2134 out:
2135 amdgpu_pm_put_access(adev);
2136
2137 if (ret)
2138 return ret;
2139
2140 return size;
2141 }
2142
2143 /**
2144 * DOC: gpuboard_temp
2145 *
2146 * The amdgpu driver provides a sysfs API for retrieving current gpuboard
2147 * temperature metrics data. The file gpuboard_temp is used for this.
2148 * Reading the file will dump all the current gpuboard temperature metrics data.
2149 */
amdgpu_get_gpuboard_temp_metrics(struct device * dev,struct device_attribute * attr,char * buf)2150 static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
2151 struct device_attribute *attr, char *buf)
2152 {
2153 struct drm_device *ddev = dev_get_drvdata(dev);
2154 struct amdgpu_device *adev = drm_to_adev(ddev);
2155 ssize_t size;
2156 int ret;
2157
2158 ret = amdgpu_pm_get_access_if_active(adev);
2159 if (ret)
2160 return ret;
2161
2162 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
2163 if (size <= 0)
2164 goto out;
2165 if (size >= PAGE_SIZE) {
2166 ret = -ENOSPC;
2167 goto out;
2168 }
2169
2170 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
2171
2172 out:
2173 amdgpu_pm_put_access(adev);
2174
2175 if (ret)
2176 return ret;
2177
2178 return size;
2179 }
2180
2181 /**
2182 * DOC: cur_node_power_limit
2183 *
2184 * The amdgpu driver provides a sysfs API for retrieving current node power limit.
2185 * The file cur_node_power_limit is used for this.
2186 */
amdgpu_show_cur_node_power_limit(struct device * dev,struct device_attribute * attr,char * buf)2187 static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev,
2188 struct device_attribute *attr, char *buf)
2189 {
2190 struct drm_device *ddev = dev_get_drvdata(dev);
2191 struct amdgpu_device *adev = drm_to_adev(ddev);
2192 u32 nplimit;
2193 int r;
2194
2195 /* get the current node power limit */
2196 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
2197 (void *)&nplimit);
2198 if (r)
2199 return r;
2200
2201 return sysfs_emit(buf, "%u\n", nplimit);
2202 }
2203
2204 /**
2205 * DOC: node_power
2206 *
2207 * The amdgpu driver provides a sysfs API for retrieving current node power.
2208 * The file node_power is used for this.
2209 */
amdgpu_show_node_power(struct device * dev,struct device_attribute * attr,char * buf)2210 static ssize_t amdgpu_show_node_power(struct device *dev,
2211 struct device_attribute *attr, char *buf)
2212 {
2213 struct drm_device *ddev = dev_get_drvdata(dev);
2214 struct amdgpu_device *adev = drm_to_adev(ddev);
2215 u32 npower;
2216 int r;
2217
2218 /* get the node power */
2219 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2220 (void *)&npower);
2221 if (r)
2222 return r;
2223
2224 return sysfs_emit(buf, "%u\n", npower);
2225 }
2226
2227 /**
2228 * DOC: npm_status
2229 *
2230 * The amdgpu driver provides a sysfs API for retrieving current node power management status.
2231 * The file npm_status is used for this. It shows the status as enabled or disabled based on
2232 * current node power value. If node power is zero, status is disabled else enabled.
2233 */
amdgpu_show_npm_status(struct device * dev,struct device_attribute * attr,char * buf)2234 static ssize_t amdgpu_show_npm_status(struct device *dev,
2235 struct device_attribute *attr, char *buf)
2236 {
2237 struct drm_device *ddev = dev_get_drvdata(dev);
2238 struct amdgpu_device *adev = drm_to_adev(ddev);
2239 u32 npower;
2240 int r;
2241
2242 /* get the node power */
2243 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
2244 (void *)&npower);
2245 if (r)
2246 return r;
2247
2248 return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled");
2249 }
2250
2251 /**
2252 * DOC: global_ppt_resid
2253 *
2254 * The amdgpu driver provides a sysfs API for retrieving global ppt residency.
2255 * The file global_ppt_resid is used for this.
2256 */
amdgpu_show_global_ppt_resid(struct device * dev,struct device_attribute * attr,char * buf)2257 static ssize_t amdgpu_show_global_ppt_resid(struct device *dev,
2258 struct device_attribute *attr, char *buf)
2259 {
2260 struct drm_device *ddev = dev_get_drvdata(dev);
2261 struct amdgpu_device *adev = drm_to_adev(ddev);
2262 u32 gpptresid;
2263 int r;
2264
2265 /* get the global ppt residency */
2266 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY,
2267 (void *)&gpptresid);
2268 if (r)
2269 return r;
2270
2271 return sysfs_emit(buf, "%u\n", gpptresid);
2272 }
2273
2274 /**
2275 * DOC: max_node_power_limit
2276 *
2277 * The amdgpu driver provides a sysfs API for retrieving maximum node power limit.
2278 * The file max_node_power_limit is used for this.
2279 */
amdgpu_show_max_node_power_limit(struct device * dev,struct device_attribute * attr,char * buf)2280 static ssize_t amdgpu_show_max_node_power_limit(struct device *dev,
2281 struct device_attribute *attr, char *buf)
2282 {
2283 struct drm_device *ddev = dev_get_drvdata(dev);
2284 struct amdgpu_device *adev = drm_to_adev(ddev);
2285 u32 max_nplimit;
2286 int r;
2287
2288 /* get the max node power limit */
2289 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
2290 (void *)&max_nplimit);
2291 if (r)
2292 return r;
2293
2294 return sysfs_emit(buf, "%u\n", max_nplimit);
2295 }
2296
2297 static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
2298 static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
2299 static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL);
2300 static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL);
2301 static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL);
2302 static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL);
2303 static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL);
2304
2305 static struct attribute *board_attrs[] = {
2306 &dev_attr_baseboard_temp.attr,
2307 &dev_attr_gpuboard_temp.attr,
2308 NULL
2309 };
2310
amdgpu_board_attr_visible(struct kobject * kobj,struct attribute * attr,int n)2311 static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
2312 {
2313 struct device *dev = kobj_to_dev(kobj);
2314 struct drm_device *ddev = dev_get_drvdata(dev);
2315 struct amdgpu_device *adev = drm_to_adev(ddev);
2316
2317 if (attr == &dev_attr_baseboard_temp.attr) {
2318 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
2319 return 0;
2320 }
2321
2322 if (attr == &dev_attr_gpuboard_temp.attr) {
2323 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
2324 return 0;
2325 }
2326
2327 return attr->mode;
2328 }
2329
2330 const struct attribute_group amdgpu_board_attr_group = {
2331 .name = "board",
2332 .attrs = board_attrs,
2333 .is_visible = amdgpu_board_attr_visible,
2334 };
2335
2336 /* pm policy attributes */
2337 struct amdgpu_pm_policy_attr {
2338 struct device_attribute dev_attr;
2339 enum pp_pm_policy id;
2340 };
2341
2342 /**
2343 * DOC: pm_policy
2344 *
2345 * Certain SOCs can support different power policies to optimize application
2346 * performance. However, this policy is provided only at SOC level and not at a
2347 * per-process level. This is useful especially when entire SOC is utilized for
2348 * dedicated workload.
2349 *
2350 * The amdgpu driver provides a sysfs API for selecting the policy. Presently,
2351 * only two types of policies are supported through this interface.
2352 *
2353 * Pstate Policy Selection - This is to select different Pstate profiles which
2354 * decides clock/throttling preferences.
2355 *
2356 * XGMI PLPD Policy Selection - When multiple devices are connected over XGMI,
2357 * this helps to select policy to be applied for per link power down.
2358 *
2359 * The list of available policies and policy levels vary between SOCs. They can
2360 * be viewed under pm_policy node directory. If SOC doesn't support any policy,
2361 * this node won't be available. The different policies supported will be
2362 * available as separate nodes under pm_policy.
2363 *
2364 * cat /sys/bus/pci/devices/.../pm_policy/<policy_type>
2365 *
2366 * Reading the policy file shows the different levels supported. The level which
2367 * is applied presently is denoted by * (asterisk). E.g.,
2368 *
2369 * .. code-block:: console
2370 *
2371 * cat /sys/bus/pci/devices/.../pm_policy/soc_pstate
2372 * 0 : soc_pstate_default
2373 * 1 : soc_pstate_0
2374 * 2 : soc_pstate_1*
2375 * 3 : soc_pstate_2
2376 *
2377 * cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2378 * 0 : plpd_disallow
2379 * 1 : plpd_default
2380 * 2 : plpd_optimized*
2381 *
2382 * To apply a specific policy
2383 *
2384 * "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>"
2385 *
2386 * For the levels listed in the example above, to select "plpd_optimized" for
2387 * XGMI and "soc_pstate_2" for soc pstate policy -
2388 *
2389 * .. code-block:: console
2390 *
2391 * echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2392 * echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate
2393 *
2394 */
amdgpu_get_pm_policy_attr(struct device * dev,struct device_attribute * attr,char * buf)2395 static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
2396 struct device_attribute *attr,
2397 char *buf)
2398 {
2399 struct drm_device *ddev = dev_get_drvdata(dev);
2400 struct amdgpu_device *adev = drm_to_adev(ddev);
2401 struct amdgpu_pm_policy_attr *policy_attr;
2402
2403 policy_attr =
2404 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2405
2406 return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
2407 }
2408
amdgpu_set_pm_policy_attr(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2409 static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
2410 struct device_attribute *attr,
2411 const char *buf, size_t count)
2412 {
2413 struct drm_device *ddev = dev_get_drvdata(dev);
2414 struct amdgpu_device *adev = drm_to_adev(ddev);
2415 struct amdgpu_pm_policy_attr *policy_attr;
2416 int ret, num_params = 0;
2417 char delimiter[] = " \n\t";
2418 char tmp_buf[128];
2419 char *tmp, *param;
2420 long val;
2421
2422 count = min(count, sizeof(tmp_buf));
2423 memcpy(tmp_buf, buf, count);
2424 tmp_buf[count - 1] = '\0';
2425 tmp = tmp_buf;
2426
2427 tmp = skip_spaces(tmp);
2428 while ((param = strsep(&tmp, delimiter))) {
2429 if (!strlen(param)) {
2430 tmp = skip_spaces(tmp);
2431 continue;
2432 }
2433 ret = kstrtol(param, 0, &val);
2434 if (ret)
2435 return -EINVAL;
2436 num_params++;
2437 if (num_params > 1)
2438 return -EINVAL;
2439 }
2440
2441 if (num_params != 1)
2442 return -EINVAL;
2443
2444 policy_attr =
2445 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2446
2447 ret = amdgpu_pm_get_access(adev);
2448 if (ret < 0)
2449 return ret;
2450
2451 ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
2452
2453 amdgpu_pm_put_access(adev);
2454
2455 if (ret)
2456 return ret;
2457
2458 return count;
2459 }
2460
2461 #define AMDGPU_PM_POLICY_ATTR(_name, _id) \
2462 static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \
2463 .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
2464 amdgpu_set_pm_policy_attr), \
2465 .id = PP_PM_POLICY_##_id, \
2466 };
2467
2468 #define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
2469
2470 AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
2471 AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD)
2472
2473 static struct attribute *pm_policy_attrs[] = {
2474 &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
2475 &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd),
2476 NULL
2477 };
2478
amdgpu_pm_policy_attr_visible(struct kobject * kobj,struct attribute * attr,int n)2479 static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
2480 struct attribute *attr, int n)
2481 {
2482 struct device *dev = kobj_to_dev(kobj);
2483 struct drm_device *ddev = dev_get_drvdata(dev);
2484 struct amdgpu_device *adev = drm_to_adev(ddev);
2485 struct amdgpu_pm_policy_attr *policy_attr;
2486
2487 policy_attr =
2488 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
2489
2490 if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
2491 -ENOENT)
2492 return 0;
2493
2494 return attr->mode;
2495 }
2496
2497 const struct attribute_group amdgpu_pm_policy_attr_group = {
2498 .name = "pm_policy",
2499 .attrs = pm_policy_attrs,
2500 .is_visible = amdgpu_pm_policy_attr_visible,
2501 };
2502
2503 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2504 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2505 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2506 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2507 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2508 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2509 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
2510 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2511 .attr_update = pp_dpm_clk_default_attr_update),
2512 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2513 .attr_update = pp_dpm_clk_default_attr_update),
2514 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2515 .attr_update = pp_dpm_clk_default_attr_update),
2516 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2517 .attr_update = pp_dpm_clk_default_attr_update),
2518 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2519 .attr_update = pp_dpm_clk_default_attr_update),
2520 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2521 .attr_update = pp_dpm_clk_default_attr_update),
2522 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2523 .attr_update = pp_dpm_clk_default_attr_update),
2524 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2525 .attr_update = pp_dpm_clk_default_attr_update),
2526 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2527 .attr_update = pp_dpm_dcefclk_attr_update),
2528 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2529 .attr_update = pp_dpm_clk_default_attr_update),
2530 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2531 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2532 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2533 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC,
2534 .attr_update = pp_od_clk_voltage_attr_update),
2535 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2536 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2537 AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2538 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2539 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2540 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2541 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2542 AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2543 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2544 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2545 .attr_update = ss_power_attr_update),
2546 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2547 .attr_update = ss_power_attr_update),
2548 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2549 .attr_update = ss_bias_attr_update),
2550 AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC,
2551 .attr_update = amdgpu_pm_metrics_attr_update),
2552 };
2553
default_attr_update(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,enum amdgpu_device_attr_states * states)2554 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2555 uint32_t mask, enum amdgpu_device_attr_states *states)
2556 {
2557 struct device_attribute *dev_attr = &attr->dev_attr;
2558 enum amdgpu_device_attr_id attr_id = attr->attr_id;
2559 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2560
2561 if (!(attr->flags & mask)) {
2562 *states = ATTR_STATE_UNSUPPORTED;
2563 return 0;
2564 }
2565
2566 if (DEVICE_ATTR_IS(mem_busy_percent)) {
2567 if ((adev->flags & AMD_IS_APU &&
2568 gc_ver != IP_VERSION(9, 4, 3)) ||
2569 gc_ver == IP_VERSION(9, 0, 1))
2570 *states = ATTR_STATE_UNSUPPORTED;
2571 } else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
2572 if (!(gc_ver == IP_VERSION(9, 3, 0) ||
2573 gc_ver == IP_VERSION(10, 3, 1) ||
2574 gc_ver == IP_VERSION(10, 3, 3) ||
2575 gc_ver == IP_VERSION(10, 3, 6) ||
2576 gc_ver == IP_VERSION(10, 3, 7) ||
2577 gc_ver == IP_VERSION(11, 0, 0) ||
2578 gc_ver == IP_VERSION(11, 0, 1) ||
2579 gc_ver == IP_VERSION(11, 0, 2) ||
2580 gc_ver == IP_VERSION(11, 0, 3) ||
2581 gc_ver == IP_VERSION(11, 0, 4) ||
2582 gc_ver == IP_VERSION(11, 5, 0) ||
2583 gc_ver == IP_VERSION(11, 5, 1) ||
2584 gc_ver == IP_VERSION(11, 5, 2) ||
2585 gc_ver == IP_VERSION(11, 5, 3) ||
2586 gc_ver == IP_VERSION(12, 0, 0) ||
2587 gc_ver == IP_VERSION(12, 0, 1)))
2588 *states = ATTR_STATE_UNSUPPORTED;
2589 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2590 /* PCIe Perf counters won't work on APU nodes */
2591 if (adev->flags & AMD_IS_APU ||
2592 !adev->asic_funcs->get_pcie_usage)
2593 *states = ATTR_STATE_UNSUPPORTED;
2594 } else if (DEVICE_ATTR_IS(unique_id)) {
2595 switch (gc_ver) {
2596 case IP_VERSION(9, 0, 1):
2597 case IP_VERSION(9, 4, 0):
2598 case IP_VERSION(9, 4, 1):
2599 case IP_VERSION(9, 4, 2):
2600 case IP_VERSION(9, 4, 3):
2601 case IP_VERSION(9, 4, 4):
2602 case IP_VERSION(9, 5, 0):
2603 case IP_VERSION(10, 3, 0):
2604 case IP_VERSION(11, 0, 0):
2605 case IP_VERSION(11, 0, 1):
2606 case IP_VERSION(11, 0, 2):
2607 case IP_VERSION(11, 0, 3):
2608 case IP_VERSION(12, 0, 0):
2609 case IP_VERSION(12, 0, 1):
2610 *states = ATTR_STATE_SUPPORTED;
2611 break;
2612 default:
2613 *states = ATTR_STATE_UNSUPPORTED;
2614 }
2615 } else if (DEVICE_ATTR_IS(pp_features)) {
2616 if ((adev->flags & AMD_IS_APU &&
2617 gc_ver != IP_VERSION(9, 4, 3)) ||
2618 gc_ver < IP_VERSION(9, 0, 0))
2619 *states = ATTR_STATE_UNSUPPORTED;
2620 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2621 if (gc_ver < IP_VERSION(9, 1, 0))
2622 *states = ATTR_STATE_UNSUPPORTED;
2623 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2624 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2625 *states = ATTR_STATE_UNSUPPORTED;
2626 else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2627 gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
2628 *states = ATTR_STATE_UNSUPPORTED;
2629 } else if (DEVICE_ATTR_IS(pp_mclk_od)) {
2630 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2631 *states = ATTR_STATE_UNSUPPORTED;
2632 } else if (DEVICE_ATTR_IS(pp_sclk_od)) {
2633 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2634 *states = ATTR_STATE_UNSUPPORTED;
2635 } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2636 u32 limit;
2637
2638 if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2639 -EOPNOTSUPP)
2640 *states = ATTR_STATE_UNSUPPORTED;
2641 } else if (DEVICE_ATTR_IS(pp_table)) {
2642 int ret;
2643 char *tmp = NULL;
2644
2645 ret = amdgpu_dpm_get_pp_table(adev, &tmp);
2646 if (ret == -EOPNOTSUPP || !tmp)
2647 *states = ATTR_STATE_UNSUPPORTED;
2648 else
2649 *states = ATTR_STATE_SUPPORTED;
2650 }
2651
2652 switch (gc_ver) {
2653 case IP_VERSION(10, 3, 0):
2654 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2655 amdgpu_sriov_vf(adev)) {
2656 dev_attr->attr.mode &= ~0222;
2657 dev_attr->store = NULL;
2658 }
2659 break;
2660 default:
2661 break;
2662 }
2663
2664 return 0;
2665 }
2666
2667
amdgpu_device_attr_create(struct amdgpu_device * adev,struct amdgpu_device_attr * attr,uint32_t mask,struct list_head * attr_list)2668 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2669 struct amdgpu_device_attr *attr,
2670 uint32_t mask, struct list_head *attr_list)
2671 {
2672 int ret = 0;
2673 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2674 struct amdgpu_device_attr_entry *attr_entry;
2675 struct device_attribute *dev_attr;
2676 const char *name;
2677
2678 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2679 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2680
2681 if (!attr)
2682 return -EINVAL;
2683
2684 dev_attr = &attr->dev_attr;
2685 name = dev_attr->attr.name;
2686
2687 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2688
2689 ret = attr_update(adev, attr, mask, &attr_states);
2690 if (ret) {
2691 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2692 name, ret);
2693 return ret;
2694 }
2695
2696 if (attr_states == ATTR_STATE_UNSUPPORTED)
2697 return 0;
2698
2699 ret = device_create_file(adev->dev, dev_attr);
2700 if (ret) {
2701 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2702 name, ret);
2703 }
2704
2705 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2706 if (!attr_entry)
2707 return -ENOMEM;
2708
2709 attr_entry->attr = attr;
2710 INIT_LIST_HEAD(&attr_entry->entry);
2711
2712 list_add_tail(&attr_entry->entry, attr_list);
2713
2714 return ret;
2715 }
2716
amdgpu_device_attr_remove(struct amdgpu_device * adev,struct amdgpu_device_attr * attr)2717 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2718 {
2719 struct device_attribute *dev_attr = &attr->dev_attr;
2720
2721 device_remove_file(adev->dev, dev_attr);
2722 }
2723
2724 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2725 struct list_head *attr_list);
2726
amdgpu_device_attr_create_groups(struct amdgpu_device * adev,struct amdgpu_device_attr * attrs,uint32_t counts,uint32_t mask,struct list_head * attr_list)2727 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2728 struct amdgpu_device_attr *attrs,
2729 uint32_t counts,
2730 uint32_t mask,
2731 struct list_head *attr_list)
2732 {
2733 int ret = 0;
2734 uint32_t i = 0;
2735
2736 for (i = 0; i < counts; i++) {
2737 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2738 if (ret)
2739 goto failed;
2740 }
2741
2742 return 0;
2743
2744 failed:
2745 amdgpu_device_attr_remove_groups(adev, attr_list);
2746
2747 return ret;
2748 }
2749
amdgpu_device_attr_remove_groups(struct amdgpu_device * adev,struct list_head * attr_list)2750 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2751 struct list_head *attr_list)
2752 {
2753 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2754
2755 if (list_empty(attr_list))
2756 return ;
2757
2758 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2759 amdgpu_device_attr_remove(adev, entry->attr);
2760 list_del(&entry->entry);
2761 kfree(entry);
2762 }
2763 }
2764
amdgpu_hwmon_show_temp(struct device * dev,struct device_attribute * attr,char * buf)2765 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2766 struct device_attribute *attr,
2767 char *buf)
2768 {
2769 struct amdgpu_device *adev = dev_get_drvdata(dev);
2770 int channel = to_sensor_dev_attr(attr)->index;
2771 int r, temp = 0;
2772
2773 if (channel >= PP_TEMP_MAX)
2774 return -EINVAL;
2775
2776 switch (channel) {
2777 case PP_TEMP_JUNCTION:
2778 /* get current junction temperature */
2779 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2780 (void *)&temp);
2781 break;
2782 case PP_TEMP_EDGE:
2783 /* get current edge temperature */
2784 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2785 (void *)&temp);
2786 break;
2787 case PP_TEMP_MEM:
2788 /* get current memory temperature */
2789 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2790 (void *)&temp);
2791 break;
2792 default:
2793 r = -EINVAL;
2794 break;
2795 }
2796
2797 if (r)
2798 return r;
2799
2800 return sysfs_emit(buf, "%d\n", temp);
2801 }
2802
amdgpu_hwmon_show_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2803 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2804 struct device_attribute *attr,
2805 char *buf)
2806 {
2807 struct amdgpu_device *adev = dev_get_drvdata(dev);
2808 int hyst = to_sensor_dev_attr(attr)->index;
2809 int temp;
2810
2811 if (hyst)
2812 temp = adev->pm.dpm.thermal.min_temp;
2813 else
2814 temp = adev->pm.dpm.thermal.max_temp;
2815
2816 return sysfs_emit(buf, "%d\n", temp);
2817 }
2818
amdgpu_hwmon_show_hotspot_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2819 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2820 struct device_attribute *attr,
2821 char *buf)
2822 {
2823 struct amdgpu_device *adev = dev_get_drvdata(dev);
2824 int hyst = to_sensor_dev_attr(attr)->index;
2825 int temp;
2826
2827 if (hyst)
2828 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2829 else
2830 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2831
2832 return sysfs_emit(buf, "%d\n", temp);
2833 }
2834
amdgpu_hwmon_show_mem_temp_thresh(struct device * dev,struct device_attribute * attr,char * buf)2835 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2836 struct device_attribute *attr,
2837 char *buf)
2838 {
2839 struct amdgpu_device *adev = dev_get_drvdata(dev);
2840 int hyst = to_sensor_dev_attr(attr)->index;
2841 int temp;
2842
2843 if (hyst)
2844 temp = adev->pm.dpm.thermal.min_mem_temp;
2845 else
2846 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2847
2848 return sysfs_emit(buf, "%d\n", temp);
2849 }
2850
amdgpu_hwmon_show_temp_label(struct device * dev,struct device_attribute * attr,char * buf)2851 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2852 struct device_attribute *attr,
2853 char *buf)
2854 {
2855 int channel = to_sensor_dev_attr(attr)->index;
2856
2857 if (channel >= PP_TEMP_MAX)
2858 return -EINVAL;
2859
2860 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2861 }
2862
amdgpu_hwmon_show_temp_emergency(struct device * dev,struct device_attribute * attr,char * buf)2863 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2864 struct device_attribute *attr,
2865 char *buf)
2866 {
2867 struct amdgpu_device *adev = dev_get_drvdata(dev);
2868 int channel = to_sensor_dev_attr(attr)->index;
2869 int temp = 0;
2870
2871 if (channel >= PP_TEMP_MAX)
2872 return -EINVAL;
2873
2874 switch (channel) {
2875 case PP_TEMP_JUNCTION:
2876 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2877 break;
2878 case PP_TEMP_EDGE:
2879 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2880 break;
2881 case PP_TEMP_MEM:
2882 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2883 break;
2884 }
2885
2886 return sysfs_emit(buf, "%d\n", temp);
2887 }
2888
amdgpu_hwmon_get_pwm1_enable(struct device * dev,struct device_attribute * attr,char * buf)2889 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2890 struct device_attribute *attr,
2891 char *buf)
2892 {
2893 struct amdgpu_device *adev = dev_get_drvdata(dev);
2894 u32 pwm_mode = 0;
2895 int ret;
2896
2897 ret = amdgpu_pm_get_access_if_active(adev);
2898 if (ret)
2899 return ret;
2900
2901 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2902
2903 amdgpu_pm_put_access(adev);
2904
2905 if (ret)
2906 return -EINVAL;
2907
2908 return sysfs_emit(buf, "%u\n", pwm_mode);
2909 }
2910
amdgpu_hwmon_set_pwm1_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2911 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2912 struct device_attribute *attr,
2913 const char *buf,
2914 size_t count)
2915 {
2916 struct amdgpu_device *adev = dev_get_drvdata(dev);
2917 int err, ret;
2918 u32 pwm_mode;
2919 int value;
2920
2921 err = kstrtoint(buf, 10, &value);
2922 if (err)
2923 return err;
2924
2925 if (value == 0)
2926 pwm_mode = AMD_FAN_CTRL_NONE;
2927 else if (value == 1)
2928 pwm_mode = AMD_FAN_CTRL_MANUAL;
2929 else if (value == 2)
2930 pwm_mode = AMD_FAN_CTRL_AUTO;
2931 else
2932 return -EINVAL;
2933
2934 ret = amdgpu_pm_get_access(adev);
2935 if (ret < 0)
2936 return ret;
2937
2938 ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2939
2940 amdgpu_pm_put_access(adev);
2941
2942 if (ret)
2943 return -EINVAL;
2944
2945 return count;
2946 }
2947
amdgpu_hwmon_get_pwm1_min(struct device * dev,struct device_attribute * attr,char * buf)2948 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2949 struct device_attribute *attr,
2950 char *buf)
2951 {
2952 return sysfs_emit(buf, "%i\n", 0);
2953 }
2954
amdgpu_hwmon_get_pwm1_max(struct device * dev,struct device_attribute * attr,char * buf)2955 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2956 struct device_attribute *attr,
2957 char *buf)
2958 {
2959 return sysfs_emit(buf, "%i\n", 255);
2960 }
2961
amdgpu_hwmon_set_pwm1(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2962 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2963 struct device_attribute *attr,
2964 const char *buf, size_t count)
2965 {
2966 struct amdgpu_device *adev = dev_get_drvdata(dev);
2967 int err;
2968 u32 value;
2969 u32 pwm_mode;
2970
2971 err = kstrtou32(buf, 10, &value);
2972 if (err)
2973 return err;
2974
2975 err = amdgpu_pm_get_access(adev);
2976 if (err < 0)
2977 return err;
2978
2979 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2980 if (err)
2981 goto out;
2982
2983 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2984 pr_info("manual fan speed control should be enabled first\n");
2985 err = -EINVAL;
2986 goto out;
2987 }
2988
2989 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2990
2991 out:
2992 amdgpu_pm_put_access(adev);
2993
2994 if (err)
2995 return err;
2996
2997 return count;
2998 }
2999
amdgpu_hwmon_get_pwm1(struct device * dev,struct device_attribute * attr,char * buf)3000 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
3001 struct device_attribute *attr,
3002 char *buf)
3003 {
3004 struct amdgpu_device *adev = dev_get_drvdata(dev);
3005 int err;
3006 u32 speed = 0;
3007
3008 err = amdgpu_pm_get_access_if_active(adev);
3009 if (err)
3010 return err;
3011
3012 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
3013
3014 amdgpu_pm_put_access(adev);
3015
3016 if (err)
3017 return err;
3018
3019 return sysfs_emit(buf, "%i\n", speed);
3020 }
3021
amdgpu_hwmon_get_fan1_input(struct device * dev,struct device_attribute * attr,char * buf)3022 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
3023 struct device_attribute *attr,
3024 char *buf)
3025 {
3026 struct amdgpu_device *adev = dev_get_drvdata(dev);
3027 int err;
3028 u32 speed = 0;
3029
3030 err = amdgpu_pm_get_access_if_active(adev);
3031 if (err)
3032 return err;
3033
3034 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
3035
3036 amdgpu_pm_put_access(adev);
3037
3038 if (err)
3039 return err;
3040
3041 return sysfs_emit(buf, "%i\n", speed);
3042 }
3043
amdgpu_hwmon_get_fan1_min(struct device * dev,struct device_attribute * attr,char * buf)3044 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
3045 struct device_attribute *attr,
3046 char *buf)
3047 {
3048 struct amdgpu_device *adev = dev_get_drvdata(dev);
3049 u32 min_rpm = 0;
3050 int r;
3051
3052 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
3053 (void *)&min_rpm);
3054
3055 if (r)
3056 return r;
3057
3058 return sysfs_emit(buf, "%d\n", min_rpm);
3059 }
3060
amdgpu_hwmon_get_fan1_max(struct device * dev,struct device_attribute * attr,char * buf)3061 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
3062 struct device_attribute *attr,
3063 char *buf)
3064 {
3065 struct amdgpu_device *adev = dev_get_drvdata(dev);
3066 u32 max_rpm = 0;
3067 int r;
3068
3069 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
3070 (void *)&max_rpm);
3071
3072 if (r)
3073 return r;
3074
3075 return sysfs_emit(buf, "%d\n", max_rpm);
3076 }
3077
amdgpu_hwmon_get_fan1_target(struct device * dev,struct device_attribute * attr,char * buf)3078 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
3079 struct device_attribute *attr,
3080 char *buf)
3081 {
3082 struct amdgpu_device *adev = dev_get_drvdata(dev);
3083 int err;
3084 u32 rpm = 0;
3085
3086 err = amdgpu_pm_get_access_if_active(adev);
3087 if (err)
3088 return err;
3089
3090 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
3091
3092 amdgpu_pm_put_access(adev);
3093
3094 if (err)
3095 return err;
3096
3097 return sysfs_emit(buf, "%i\n", rpm);
3098 }
3099
amdgpu_hwmon_set_fan1_target(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3100 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
3101 struct device_attribute *attr,
3102 const char *buf, size_t count)
3103 {
3104 struct amdgpu_device *adev = dev_get_drvdata(dev);
3105 int err;
3106 u32 value;
3107 u32 pwm_mode;
3108
3109 err = kstrtou32(buf, 10, &value);
3110 if (err)
3111 return err;
3112
3113 err = amdgpu_pm_get_access(adev);
3114 if (err < 0)
3115 return err;
3116
3117 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3118 if (err)
3119 goto out;
3120
3121 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
3122 err = -ENODATA;
3123 goto out;
3124 }
3125
3126 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
3127
3128 out:
3129 amdgpu_pm_put_access(adev);
3130
3131 if (err)
3132 return err;
3133
3134 return count;
3135 }
3136
amdgpu_hwmon_get_fan1_enable(struct device * dev,struct device_attribute * attr,char * buf)3137 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
3138 struct device_attribute *attr,
3139 char *buf)
3140 {
3141 struct amdgpu_device *adev = dev_get_drvdata(dev);
3142 u32 pwm_mode = 0;
3143 int ret;
3144
3145 ret = amdgpu_pm_get_access_if_active(adev);
3146 if (ret)
3147 return ret;
3148
3149 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
3150
3151 amdgpu_pm_put_access(adev);
3152
3153 if (ret)
3154 return -EINVAL;
3155
3156 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
3157 }
3158
amdgpu_hwmon_set_fan1_enable(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3159 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
3160 struct device_attribute *attr,
3161 const char *buf,
3162 size_t count)
3163 {
3164 struct amdgpu_device *adev = dev_get_drvdata(dev);
3165 int err;
3166 int value;
3167 u32 pwm_mode;
3168
3169 err = kstrtoint(buf, 10, &value);
3170 if (err)
3171 return err;
3172
3173 if (value == 0)
3174 pwm_mode = AMD_FAN_CTRL_AUTO;
3175 else if (value == 1)
3176 pwm_mode = AMD_FAN_CTRL_MANUAL;
3177 else
3178 return -EINVAL;
3179
3180 err = amdgpu_pm_get_access(adev);
3181 if (err < 0)
3182 return err;
3183
3184 err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
3185
3186 amdgpu_pm_put_access(adev);
3187
3188 if (err)
3189 return -EINVAL;
3190
3191 return count;
3192 }
3193
amdgpu_hwmon_show_vddgfx(struct device * dev,struct device_attribute * attr,char * buf)3194 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
3195 struct device_attribute *attr,
3196 char *buf)
3197 {
3198 struct amdgpu_device *adev = dev_get_drvdata(dev);
3199 u32 vddgfx;
3200 int r;
3201
3202 /* get the voltage */
3203 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
3204 (void *)&vddgfx);
3205 if (r)
3206 return r;
3207
3208 return sysfs_emit(buf, "%d\n", vddgfx);
3209 }
3210
amdgpu_hwmon_show_vddboard(struct device * dev,struct device_attribute * attr,char * buf)3211 static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
3212 struct device_attribute *attr,
3213 char *buf)
3214 {
3215 struct amdgpu_device *adev = dev_get_drvdata(dev);
3216 u32 vddboard;
3217 int r;
3218
3219 /* get the voltage */
3220 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3221 (void *)&vddboard);
3222 if (r)
3223 return r;
3224
3225 return sysfs_emit(buf, "%d\n", vddboard);
3226 }
3227
amdgpu_hwmon_show_vddgfx_label(struct device * dev,struct device_attribute * attr,char * buf)3228 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
3229 struct device_attribute *attr,
3230 char *buf)
3231 {
3232 return sysfs_emit(buf, "vddgfx\n");
3233 }
3234
amdgpu_hwmon_show_vddboard_label(struct device * dev,struct device_attribute * attr,char * buf)3235 static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
3236 struct device_attribute *attr,
3237 char *buf)
3238 {
3239 return sysfs_emit(buf, "vddboard\n");
3240 }
amdgpu_hwmon_show_vddnb(struct device * dev,struct device_attribute * attr,char * buf)3241 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
3242 struct device_attribute *attr,
3243 char *buf)
3244 {
3245 struct amdgpu_device *adev = dev_get_drvdata(dev);
3246 u32 vddnb;
3247 int r;
3248
3249 /* only APUs have vddnb */
3250 if (!(adev->flags & AMD_IS_APU))
3251 return -EINVAL;
3252
3253 /* get the voltage */
3254 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
3255 (void *)&vddnb);
3256 if (r)
3257 return r;
3258
3259 return sysfs_emit(buf, "%d\n", vddnb);
3260 }
3261
amdgpu_hwmon_show_vddnb_label(struct device * dev,struct device_attribute * attr,char * buf)3262 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
3263 struct device_attribute *attr,
3264 char *buf)
3265 {
3266 return sysfs_emit(buf, "vddnb\n");
3267 }
3268
amdgpu_hwmon_get_power(struct device * dev,enum amd_pp_sensors sensor)3269 static int amdgpu_hwmon_get_power(struct device *dev,
3270 enum amd_pp_sensors sensor)
3271 {
3272 struct amdgpu_device *adev = dev_get_drvdata(dev);
3273 unsigned int uw;
3274 u32 query = 0;
3275 int r;
3276
3277 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query);
3278 if (r)
3279 return r;
3280
3281 /* convert to microwatts */
3282 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3283
3284 return uw;
3285 }
3286
amdgpu_hwmon_show_power_avg(struct device * dev,struct device_attribute * attr,char * buf)3287 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3288 struct device_attribute *attr,
3289 char *buf)
3290 {
3291 ssize_t val;
3292
3293 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
3294 if (val < 0)
3295 return val;
3296
3297 return sysfs_emit(buf, "%zd\n", val);
3298 }
3299
amdgpu_hwmon_show_power_input(struct device * dev,struct device_attribute * attr,char * buf)3300 static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
3301 struct device_attribute *attr,
3302 char *buf)
3303 {
3304 ssize_t val;
3305
3306 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
3307 if (val < 0)
3308 return val;
3309
3310 return sysfs_emit(buf, "%zd\n", val);
3311 }
3312
amdgpu_hwmon_show_power_cap_generic(struct device * dev,struct device_attribute * attr,char * buf,enum pp_power_limit_level pp_limit_level)3313 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
3314 struct device_attribute *attr,
3315 char *buf,
3316 enum pp_power_limit_level pp_limit_level)
3317 {
3318 struct amdgpu_device *adev = dev_get_drvdata(dev);
3319 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
3320 uint32_t limit;
3321 ssize_t size;
3322 int r;
3323
3324 r = amdgpu_pm_get_access_if_active(adev);
3325 if (r)
3326 return r;
3327
3328 r = amdgpu_dpm_get_power_limit(adev, &limit,
3329 pp_limit_level, power_type);
3330
3331 if (!r)
3332 size = sysfs_emit(buf, "%u\n", limit * 1000000);
3333 else
3334 size = sysfs_emit(buf, "\n");
3335
3336 amdgpu_pm_put_access(adev);
3337
3338 return size;
3339 }
3340
amdgpu_hwmon_show_power_cap_min(struct device * dev,struct device_attribute * attr,char * buf)3341 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3342 struct device_attribute *attr,
3343 char *buf)
3344 {
3345 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
3346 }
3347
amdgpu_hwmon_show_power_cap_max(struct device * dev,struct device_attribute * attr,char * buf)3348 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3349 struct device_attribute *attr,
3350 char *buf)
3351 {
3352 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3353
3354 }
3355
amdgpu_hwmon_show_power_cap(struct device * dev,struct device_attribute * attr,char * buf)3356 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3357 struct device_attribute *attr,
3358 char *buf)
3359 {
3360 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3361
3362 }
3363
amdgpu_hwmon_show_power_cap_default(struct device * dev,struct device_attribute * attr,char * buf)3364 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3365 struct device_attribute *attr,
3366 char *buf)
3367 {
3368 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3369
3370 }
3371
amdgpu_hwmon_show_power_label(struct device * dev,struct device_attribute * attr,char * buf)3372 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3373 struct device_attribute *attr,
3374 char *buf)
3375 {
3376 struct amdgpu_device *adev = dev_get_drvdata(dev);
3377 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3378
3379 if (gc_ver == IP_VERSION(10, 3, 1))
3380 return sysfs_emit(buf, "%s\n",
3381 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3382 "fastPPT" : "slowPPT");
3383 else
3384 return sysfs_emit(buf, "%s\n",
3385 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3386 "PPT1" : "PPT");
3387 }
3388
amdgpu_hwmon_set_power_cap(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3389 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3390 struct device_attribute *attr,
3391 const char *buf,
3392 size_t count)
3393 {
3394 struct amdgpu_device *adev = dev_get_drvdata(dev);
3395 int limit_type = to_sensor_dev_attr(attr)->index;
3396 int err;
3397 u32 value;
3398
3399 err = kstrtou32(buf, 10, &value);
3400 if (err)
3401 return err;
3402
3403 value = value / 1000000; /* convert to Watt */
3404
3405 err = amdgpu_pm_get_access(adev);
3406 if (err < 0)
3407 return err;
3408
3409 err = amdgpu_dpm_set_power_limit(adev, limit_type, value);
3410
3411 amdgpu_pm_put_access(adev);
3412
3413 if (err)
3414 return err;
3415
3416 return count;
3417 }
3418
amdgpu_hwmon_show_sclk(struct device * dev,struct device_attribute * attr,char * buf)3419 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3420 struct device_attribute *attr,
3421 char *buf)
3422 {
3423 struct amdgpu_device *adev = dev_get_drvdata(dev);
3424 uint32_t sclk;
3425 int r;
3426
3427 /* get the sclk */
3428 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3429 (void *)&sclk);
3430 if (r)
3431 return r;
3432
3433 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3434 }
3435
amdgpu_hwmon_show_sclk_label(struct device * dev,struct device_attribute * attr,char * buf)3436 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3437 struct device_attribute *attr,
3438 char *buf)
3439 {
3440 return sysfs_emit(buf, "sclk\n");
3441 }
3442
amdgpu_hwmon_show_mclk(struct device * dev,struct device_attribute * attr,char * buf)3443 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3444 struct device_attribute *attr,
3445 char *buf)
3446 {
3447 struct amdgpu_device *adev = dev_get_drvdata(dev);
3448 uint32_t mclk;
3449 int r;
3450
3451 /* get the sclk */
3452 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3453 (void *)&mclk);
3454 if (r)
3455 return r;
3456
3457 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3458 }
3459
amdgpu_hwmon_show_mclk_label(struct device * dev,struct device_attribute * attr,char * buf)3460 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3461 struct device_attribute *attr,
3462 char *buf)
3463 {
3464 return sysfs_emit(buf, "mclk\n");
3465 }
3466
3467 /**
3468 * DOC: hwmon
3469 *
3470 * The amdgpu driver exposes the following sensor interfaces:
3471 *
3472 * - GPU temperature (via the on-die sensor)
3473 *
3474 * - GPU voltage
3475 *
3476 * - Northbridge voltage (APUs only)
3477 *
3478 * - GPU power
3479 *
3480 * - GPU fan
3481 *
3482 * - GPU gfx/compute engine clock
3483 *
3484 * - GPU memory clock (dGPU only)
3485 *
3486 * hwmon interfaces for GPU temperature:
3487 *
3488 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3489 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
3490 *
3491 * - temp[1-3]_label: temperature channel label
3492 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3493 *
3494 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3495 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3496 *
3497 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3498 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3499 *
3500 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3501 * - these are supported on SOC15 dGPUs only
3502 *
3503 * hwmon interfaces for GPU voltage:
3504 *
3505 * - in0_input: the voltage on the GPU in millivolts
3506 *
3507 * - in1_input: the voltage on the Northbridge in millivolts
3508 *
3509 * hwmon interfaces for GPU power:
3510 *
3511 * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
3512 *
3513 * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU.
3514 *
3515 * - power1_cap_min: minimum cap supported in microWatts
3516 *
3517 * - power1_cap_max: maximum cap supported in microWatts
3518 *
3519 * - power1_cap: selected power cap in microWatts
3520 *
3521 * hwmon interfaces for GPU fan:
3522 *
3523 * - pwm1: pulse width modulation fan level (0-255)
3524 *
3525 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3526 *
3527 * - pwm1_min: pulse width modulation fan control minimum level (0)
3528 *
3529 * - pwm1_max: pulse width modulation fan control maximum level (255)
3530 *
3531 * - fan1_min: a minimum value Unit: revolution/min (RPM)
3532 *
3533 * - fan1_max: a maximum value Unit: revolution/max (RPM)
3534 *
3535 * - fan1_input: fan speed in RPM
3536 *
3537 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3538 *
3539 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3540 *
3541 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3542 * That will get the former one overridden.
3543 *
3544 * hwmon interfaces for GPU clocks:
3545 *
3546 * - freq1_input: the gfx/compute clock in hertz
3547 *
3548 * - freq2_input: the memory clock in hertz
3549 *
3550 * You can use hwmon tools like sensors to view this information on your system.
3551 *
3552 */
3553
3554 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3555 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3556 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3557 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3558 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3559 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3560 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3561 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3562 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3563 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3564 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3565 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3566 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3567 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3568 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3569 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3570 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3571 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3572 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3573 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3574 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3575 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3576 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3577 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3578 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3579 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3580 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3581 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3582 static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
3583 static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
3584 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3585 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3586 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3587 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3588 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3589 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3590 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3591 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3592 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3593 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3594 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3595 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3596 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3597 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3598 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3599 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3600
3601 static struct attribute *hwmon_attributes[] = {
3602 &sensor_dev_attr_temp1_input.dev_attr.attr,
3603 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3604 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3605 &sensor_dev_attr_temp2_input.dev_attr.attr,
3606 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3607 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3608 &sensor_dev_attr_temp3_input.dev_attr.attr,
3609 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3610 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3611 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3612 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3613 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3614 &sensor_dev_attr_temp1_label.dev_attr.attr,
3615 &sensor_dev_attr_temp2_label.dev_attr.attr,
3616 &sensor_dev_attr_temp3_label.dev_attr.attr,
3617 &sensor_dev_attr_pwm1.dev_attr.attr,
3618 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3619 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3620 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3621 &sensor_dev_attr_fan1_input.dev_attr.attr,
3622 &sensor_dev_attr_fan1_min.dev_attr.attr,
3623 &sensor_dev_attr_fan1_max.dev_attr.attr,
3624 &sensor_dev_attr_fan1_target.dev_attr.attr,
3625 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3626 &sensor_dev_attr_in0_input.dev_attr.attr,
3627 &sensor_dev_attr_in0_label.dev_attr.attr,
3628 &sensor_dev_attr_in1_input.dev_attr.attr,
3629 &sensor_dev_attr_in1_label.dev_attr.attr,
3630 &sensor_dev_attr_in2_input.dev_attr.attr,
3631 &sensor_dev_attr_in2_label.dev_attr.attr,
3632 &sensor_dev_attr_power1_average.dev_attr.attr,
3633 &sensor_dev_attr_power1_input.dev_attr.attr,
3634 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3635 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3636 &sensor_dev_attr_power1_cap.dev_attr.attr,
3637 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3638 &sensor_dev_attr_power1_label.dev_attr.attr,
3639 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3640 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3641 &sensor_dev_attr_power2_cap.dev_attr.attr,
3642 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3643 &sensor_dev_attr_power2_label.dev_attr.attr,
3644 &sensor_dev_attr_freq1_input.dev_attr.attr,
3645 &sensor_dev_attr_freq1_label.dev_attr.attr,
3646 &sensor_dev_attr_freq2_input.dev_attr.attr,
3647 &sensor_dev_attr_freq2_label.dev_attr.attr,
3648 NULL
3649 };
3650
hwmon_attributes_visible(struct kobject * kobj,struct attribute * attr,int index)3651 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3652 struct attribute *attr, int index)
3653 {
3654 struct device *dev = kobj_to_dev(kobj);
3655 struct amdgpu_device *adev = dev_get_drvdata(dev);
3656 umode_t effective_mode = attr->mode;
3657 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3658 uint32_t tmp;
3659
3660 /* under pp one vf mode manage of hwmon attributes is not supported */
3661 if (amdgpu_sriov_is_pp_one_vf(adev))
3662 effective_mode &= ~S_IWUSR;
3663
3664 /* Skip fan attributes if fan is not present */
3665 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3666 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3667 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3668 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3669 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3670 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3671 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3672 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3673 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3674 return 0;
3675
3676 /* Skip fan attributes on APU */
3677 if ((adev->flags & AMD_IS_APU) &&
3678 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3679 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3680 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3681 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3682 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3683 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3684 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3685 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3686 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3687 return 0;
3688
3689 /* Skip crit temp on APU */
3690 if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3691 (gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
3692 gc_ver == IP_VERSION(9, 5, 0))) &&
3693 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3694 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3695 return 0;
3696
3697 /* Skip limit attributes if DPM is not enabled */
3698 if (!adev->pm.dpm_enabled &&
3699 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3700 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3701 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3702 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3703 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3704 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3705 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3706 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3707 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3708 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3709 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3710 return 0;
3711
3712 /* mask fan attributes if we have no bindings for this asic to expose */
3713 if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3714 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3715 ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3716 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3717 effective_mode &= ~S_IRUGO;
3718
3719 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3720 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3721 ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3722 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3723 effective_mode &= ~S_IWUSR;
3724
3725 /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3726 if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3727 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3728 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3729 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
3730 if (adev->family == AMDGPU_FAMILY_SI ||
3731 ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
3732 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
3733 (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
3734 return 0;
3735 }
3736
3737 if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr &&
3738 amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT))
3739 effective_mode |= S_IWUSR;
3740
3741 /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3742 if (((adev->family == AMDGPU_FAMILY_SI) ||
3743 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3744 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3745 return 0;
3746
3747 /* not all products support both average and instantaneous */
3748 if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3749 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER,
3750 (void *)&tmp) == -EOPNOTSUPP)
3751 return 0;
3752 if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3753 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
3754 (void *)&tmp) == -EOPNOTSUPP)
3755 return 0;
3756
3757 /* hide max/min values if we can't both query and manage the fan */
3758 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3759 (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3760 (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3761 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3762 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3763 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3764 return 0;
3765
3766 if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3767 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3768 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3769 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3770 return 0;
3771
3772 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3773 adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
3774 (gc_ver == IP_VERSION(9, 4, 3) ||
3775 gc_ver == IP_VERSION(9, 4, 4) ||
3776 gc_ver == IP_VERSION(9, 5, 0))) &&
3777 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3778 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3779 return 0;
3780
3781 /* only APUs other than gc 9,4,3 have vddnb */
3782 if ((!(adev->flags & AMD_IS_APU) ||
3783 (gc_ver == IP_VERSION(9, 4, 3) ||
3784 gc_ver == IP_VERSION(9, 4, 4) ||
3785 gc_ver == IP_VERSION(9, 5, 0))) &&
3786 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3787 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3788 return 0;
3789
3790 /* only few boards support vddboard */
3791 if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
3792 attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
3793 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3794 (void *)&tmp) == -EOPNOTSUPP)
3795 return 0;
3796
3797 /* no mclk on APUs other than gc 9,4,3*/
3798 if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3799 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3800 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3801 return 0;
3802
3803 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3804 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) &&
3805 (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3806 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3807 attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3808 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3809 attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3810 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3811 return 0;
3812
3813 /* hotspot temperature for gc 9,4,3*/
3814 if (gc_ver == IP_VERSION(9, 4, 3) ||
3815 gc_ver == IP_VERSION(9, 4, 4) ||
3816 gc_ver == IP_VERSION(9, 5, 0)) {
3817 if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3818 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3819 attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3820 return 0;
3821
3822 if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3823 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3824 return attr->mode;
3825 }
3826
3827 /* only SOC15 dGPUs support hotspot and mem temperatures */
3828 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3829 (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3830 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3831 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3832 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3833 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3834 return 0;
3835
3836 /* only Vangogh has fast PPT limit and power labels */
3837 if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3838 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3839 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3840 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3841 attr == &sensor_dev_attr_power2_label.dev_attr.attr) &&
3842 (amdgpu_dpm_get_power_limit(adev, &tmp,
3843 PP_PWR_LIMIT_MAX,
3844 PP_PWR_TYPE_FAST) == -EOPNOTSUPP))
3845 return 0;
3846
3847 return effective_mode;
3848 }
3849
3850 static const struct attribute_group hwmon_attrgroup = {
3851 .attrs = hwmon_attributes,
3852 .is_visible = hwmon_attributes_visible,
3853 };
3854
3855 static const struct attribute_group *hwmon_groups[] = {
3856 &hwmon_attrgroup,
3857 NULL
3858 };
3859
amdgpu_retrieve_od_settings(struct amdgpu_device * adev,enum pp_clock_type od_type,char * buf)3860 static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3861 enum pp_clock_type od_type,
3862 char *buf)
3863 {
3864 int size = 0;
3865 int ret;
3866
3867 ret = amdgpu_pm_get_access_if_active(adev);
3868 if (ret)
3869 return ret;
3870
3871 size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
3872 if (size == 0)
3873 size = sysfs_emit(buf, "\n");
3874
3875 amdgpu_pm_put_access(adev);
3876
3877 return size;
3878 }
3879
parse_input_od_command_lines(const char * buf,size_t count,u32 * type,long * params,uint32_t * num_of_params)3880 static int parse_input_od_command_lines(const char *buf,
3881 size_t count,
3882 u32 *type,
3883 long *params,
3884 uint32_t *num_of_params)
3885 {
3886 const char delimiter[3] = {' ', '\n', '\0'};
3887 uint32_t parameter_size = 0;
3888 char buf_cpy[128] = {0};
3889 char *tmp_str, *sub_str;
3890 int ret;
3891
3892 if (count > sizeof(buf_cpy) - 1)
3893 return -EINVAL;
3894
3895 memcpy(buf_cpy, buf, count);
3896 tmp_str = buf_cpy;
3897
3898 /* skip heading spaces */
3899 while (isspace(*tmp_str))
3900 tmp_str++;
3901
3902 switch (*tmp_str) {
3903 case 'c':
3904 *type = PP_OD_COMMIT_DPM_TABLE;
3905 return 0;
3906 case 'r':
3907 params[parameter_size] = *type;
3908 *num_of_params = 1;
3909 *type = PP_OD_RESTORE_DEFAULT_TABLE;
3910 return 0;
3911 default:
3912 break;
3913 }
3914
3915 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3916 if (strlen(sub_str) == 0)
3917 continue;
3918
3919 ret = kstrtol(sub_str, 0, ¶ms[parameter_size]);
3920 if (ret)
3921 return -EINVAL;
3922 parameter_size++;
3923
3924 if (!tmp_str)
3925 break;
3926
3927 while (isspace(*tmp_str))
3928 tmp_str++;
3929 }
3930
3931 *num_of_params = parameter_size;
3932
3933 return 0;
3934 }
3935
3936 static int
amdgpu_distribute_custom_od_settings(struct amdgpu_device * adev,enum PP_OD_DPM_TABLE_COMMAND cmd_type,const char * in_buf,size_t count)3937 amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3938 enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3939 const char *in_buf,
3940 size_t count)
3941 {
3942 uint32_t parameter_size = 0;
3943 long parameter[64];
3944 int ret;
3945
3946 ret = parse_input_od_command_lines(in_buf,
3947 count,
3948 &cmd_type,
3949 parameter,
3950 ¶meter_size);
3951 if (ret)
3952 return ret;
3953
3954 ret = amdgpu_pm_get_access(adev);
3955 if (ret < 0)
3956 return ret;
3957
3958 ret = amdgpu_dpm_odn_edit_dpm_table(adev,
3959 cmd_type,
3960 parameter,
3961 parameter_size);
3962 if (ret)
3963 goto err_out;
3964
3965 if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
3966 ret = amdgpu_dpm_dispatch_task(adev,
3967 AMD_PP_TASK_READJUST_POWER_STATE,
3968 NULL);
3969 if (ret)
3970 goto err_out;
3971 }
3972
3973 amdgpu_pm_put_access(adev);
3974
3975 return count;
3976
3977 err_out:
3978 amdgpu_pm_put_access(adev);
3979
3980 return ret;
3981 }
3982
3983 /**
3984 * DOC: fan_curve
3985 *
3986 * The amdgpu driver provides a sysfs API for checking and adjusting the fan
3987 * control curve line.
3988 *
3989 * Reading back the file shows you the current settings(temperature in Celsius
3990 * degree and fan speed in pwm) applied to every anchor point of the curve line
3991 * and their permitted ranges if changable.
3992 *
3993 * Writing a desired string(with the format like "anchor_point_index temperature
3994 * fan_speed_in_pwm") to the file, change the settings for the specific anchor
3995 * point accordingly.
3996 *
3997 * When you have finished the editing, write "c" (commit) to the file to commit
3998 * your changes.
3999 *
4000 * If you want to reset to the default value, write "r" (reset) to the file to
4001 * reset them
4002 *
4003 * There are two fan control modes supported: auto and manual. With auto mode,
4004 * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
4005 * While with manual mode, users can set their own fan curve line as what
4006 * described here. Normally the ASIC is booted up with auto mode. Any
4007 * settings via this interface will switch the fan control to manual mode
4008 * implicitly.
4009 */
fan_curve_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4010 static ssize_t fan_curve_show(struct kobject *kobj,
4011 struct kobj_attribute *attr,
4012 char *buf)
4013 {
4014 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4015 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4016
4017 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
4018 }
4019
fan_curve_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4020 static ssize_t fan_curve_store(struct kobject *kobj,
4021 struct kobj_attribute *attr,
4022 const char *buf,
4023 size_t count)
4024 {
4025 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4026 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4027
4028 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4029 PP_OD_EDIT_FAN_CURVE,
4030 buf,
4031 count);
4032 }
4033
fan_curve_visible(struct amdgpu_device * adev)4034 static umode_t fan_curve_visible(struct amdgpu_device *adev)
4035 {
4036 umode_t umode = 0000;
4037
4038 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
4039 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4040
4041 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
4042 umode |= S_IWUSR;
4043
4044 return umode;
4045 }
4046
4047 /**
4048 * DOC: acoustic_limit_rpm_threshold
4049 *
4050 * The amdgpu driver provides a sysfs API for checking and adjusting the
4051 * acoustic limit in RPM for fan control.
4052 *
4053 * Reading back the file shows you the current setting and the permitted
4054 * ranges if changable.
4055 *
4056 * Writing an integer to the file, change the setting accordingly.
4057 *
4058 * When you have finished the editing, write "c" (commit) to the file to commit
4059 * your changes.
4060 *
4061 * If you want to reset to the default value, write "r" (reset) to the file to
4062 * reset them
4063 *
4064 * This setting works under auto fan control mode only. It adjusts the PMFW's
4065 * behavior about the maximum speed in RPM the fan can spin. Setting via this
4066 * interface will switch the fan control to auto mode implicitly.
4067 */
acoustic_limit_threshold_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4068 static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
4069 struct kobj_attribute *attr,
4070 char *buf)
4071 {
4072 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4073 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4074
4075 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
4076 }
4077
acoustic_limit_threshold_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4078 static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
4079 struct kobj_attribute *attr,
4080 const char *buf,
4081 size_t count)
4082 {
4083 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4084 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4085
4086 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4087 PP_OD_EDIT_ACOUSTIC_LIMIT,
4088 buf,
4089 count);
4090 }
4091
acoustic_limit_threshold_visible(struct amdgpu_device * adev)4092 static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
4093 {
4094 umode_t umode = 0000;
4095
4096 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
4097 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4098
4099 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
4100 umode |= S_IWUSR;
4101
4102 return umode;
4103 }
4104
4105 /**
4106 * DOC: acoustic_target_rpm_threshold
4107 *
4108 * The amdgpu driver provides a sysfs API for checking and adjusting the
4109 * acoustic target in RPM for fan control.
4110 *
4111 * Reading back the file shows you the current setting and the permitted
4112 * ranges if changable.
4113 *
4114 * Writing an integer to the file, change the setting accordingly.
4115 *
4116 * When you have finished the editing, write "c" (commit) to the file to commit
4117 * your changes.
4118 *
4119 * If you want to reset to the default value, write "r" (reset) to the file to
4120 * reset them
4121 *
4122 * This setting works under auto fan control mode only. It can co-exist with
4123 * other settings which can work also under auto mode. It adjusts the PMFW's
4124 * behavior about the maximum speed in RPM the fan can spin when ASIC
4125 * temperature is not greater than target temperature. Setting via this
4126 * interface will switch the fan control to auto mode implicitly.
4127 */
acoustic_target_threshold_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4128 static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
4129 struct kobj_attribute *attr,
4130 char *buf)
4131 {
4132 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4133 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4134
4135 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
4136 }
4137
acoustic_target_threshold_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4138 static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
4139 struct kobj_attribute *attr,
4140 const char *buf,
4141 size_t count)
4142 {
4143 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4144 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4145
4146 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4147 PP_OD_EDIT_ACOUSTIC_TARGET,
4148 buf,
4149 count);
4150 }
4151
acoustic_target_threshold_visible(struct amdgpu_device * adev)4152 static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
4153 {
4154 umode_t umode = 0000;
4155
4156 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
4157 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4158
4159 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
4160 umode |= S_IWUSR;
4161
4162 return umode;
4163 }
4164
4165 /**
4166 * DOC: fan_target_temperature
4167 *
4168 * The amdgpu driver provides a sysfs API for checking and adjusting the
4169 * target tempeature in Celsius degree for fan control.
4170 *
4171 * Reading back the file shows you the current setting and the permitted
4172 * ranges if changable.
4173 *
4174 * Writing an integer to the file, change the setting accordingly.
4175 *
4176 * When you have finished the editing, write "c" (commit) to the file to commit
4177 * your changes.
4178 *
4179 * If you want to reset to the default value, write "r" (reset) to the file to
4180 * reset them
4181 *
4182 * This setting works under auto fan control mode only. It can co-exist with
4183 * other settings which can work also under auto mode. Paring with the
4184 * acoustic_target_rpm_threshold setting, they define the maximum speed in
4185 * RPM the fan can spin when ASIC temperature is not greater than target
4186 * temperature. Setting via this interface will switch the fan control to
4187 * auto mode implicitly.
4188 */
fan_target_temperature_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4189 static ssize_t fan_target_temperature_show(struct kobject *kobj,
4190 struct kobj_attribute *attr,
4191 char *buf)
4192 {
4193 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4194 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4195
4196 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
4197 }
4198
fan_target_temperature_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4199 static ssize_t fan_target_temperature_store(struct kobject *kobj,
4200 struct kobj_attribute *attr,
4201 const char *buf,
4202 size_t count)
4203 {
4204 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4205 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4206
4207 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4208 PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
4209 buf,
4210 count);
4211 }
4212
fan_target_temperature_visible(struct amdgpu_device * adev)4213 static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
4214 {
4215 umode_t umode = 0000;
4216
4217 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
4218 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4219
4220 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
4221 umode |= S_IWUSR;
4222
4223 return umode;
4224 }
4225
4226 /**
4227 * DOC: fan_minimum_pwm
4228 *
4229 * The amdgpu driver provides a sysfs API for checking and adjusting the
4230 * minimum fan speed in PWM.
4231 *
4232 * Reading back the file shows you the current setting and the permitted
4233 * ranges if changable.
4234 *
4235 * Writing an integer to the file, change the setting accordingly.
4236 *
4237 * When you have finished the editing, write "c" (commit) to the file to commit
4238 * your changes.
4239 *
4240 * If you want to reset to the default value, write "r" (reset) to the file to
4241 * reset them
4242 *
4243 * This setting works under auto fan control mode only. It can co-exist with
4244 * other settings which can work also under auto mode. It adjusts the PMFW's
4245 * behavior about the minimum fan speed in PWM the fan should spin. Setting
4246 * via this interface will switch the fan control to auto mode implicitly.
4247 */
fan_minimum_pwm_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4248 static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
4249 struct kobj_attribute *attr,
4250 char *buf)
4251 {
4252 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4253 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4254
4255 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
4256 }
4257
fan_minimum_pwm_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4258 static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
4259 struct kobj_attribute *attr,
4260 const char *buf,
4261 size_t count)
4262 {
4263 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4264 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4265
4266 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4267 PP_OD_EDIT_FAN_MINIMUM_PWM,
4268 buf,
4269 count);
4270 }
4271
fan_minimum_pwm_visible(struct amdgpu_device * adev)4272 static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
4273 {
4274 umode_t umode = 0000;
4275
4276 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
4277 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4278
4279 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
4280 umode |= S_IWUSR;
4281
4282 return umode;
4283 }
4284
4285 /**
4286 * DOC: fan_zero_rpm_enable
4287 *
4288 * The amdgpu driver provides a sysfs API for checking and adjusting the
4289 * zero RPM feature.
4290 *
4291 * Reading back the file shows you the current setting and the permitted
4292 * ranges if changable.
4293 *
4294 * Writing an integer to the file, change the setting accordingly.
4295 *
4296 * When you have finished the editing, write "c" (commit) to the file to commit
4297 * your changes.
4298 *
4299 * If you want to reset to the default value, write "r" (reset) to the file to
4300 * reset them.
4301 */
fan_zero_rpm_enable_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4302 static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
4303 struct kobj_attribute *attr,
4304 char *buf)
4305 {
4306 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4307 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4308
4309 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
4310 }
4311
fan_zero_rpm_enable_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4312 static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
4313 struct kobj_attribute *attr,
4314 const char *buf,
4315 size_t count)
4316 {
4317 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4318 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4319
4320 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4321 PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
4322 buf,
4323 count);
4324 }
4325
fan_zero_rpm_enable_visible(struct amdgpu_device * adev)4326 static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
4327 {
4328 umode_t umode = 0000;
4329
4330 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
4331 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4332
4333 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
4334 umode |= S_IWUSR;
4335
4336 return umode;
4337 }
4338
4339 /**
4340 * DOC: fan_zero_rpm_stop_temperature
4341 *
4342 * The amdgpu driver provides a sysfs API for checking and adjusting the
4343 * zero RPM stop temperature feature.
4344 *
4345 * Reading back the file shows you the current setting and the permitted
4346 * ranges if changable.
4347 *
4348 * Writing an integer to the file, change the setting accordingly.
4349 *
4350 * When you have finished the editing, write "c" (commit) to the file to commit
4351 * your changes.
4352 *
4353 * If you want to reset to the default value, write "r" (reset) to the file to
4354 * reset them.
4355 *
4356 * This setting works only if the Zero RPM setting is enabled. It adjusts the
4357 * temperature below which the fan can stop.
4358 */
fan_zero_rpm_stop_temp_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)4359 static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
4360 struct kobj_attribute *attr,
4361 char *buf)
4362 {
4363 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4364 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4365
4366 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
4367 }
4368
fan_zero_rpm_stop_temp_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)4369 static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
4370 struct kobj_attribute *attr,
4371 const char *buf,
4372 size_t count)
4373 {
4374 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4375 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4376
4377 return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4378 PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
4379 buf,
4380 count);
4381 }
4382
fan_zero_rpm_stop_temp_visible(struct amdgpu_device * adev)4383 static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
4384 {
4385 umode_t umode = 0000;
4386
4387 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
4388 umode |= S_IRUSR | S_IRGRP | S_IROTH;
4389
4390 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
4391 umode |= S_IWUSR;
4392
4393 return umode;
4394 }
4395
4396 static struct od_feature_set amdgpu_od_set = {
4397 .containers = {
4398 [0] = {
4399 .name = "fan_ctrl",
4400 .sub_feature = {
4401 [0] = {
4402 .name = "fan_curve",
4403 .ops = {
4404 .is_visible = fan_curve_visible,
4405 .show = fan_curve_show,
4406 .store = fan_curve_store,
4407 },
4408 },
4409 [1] = {
4410 .name = "acoustic_limit_rpm_threshold",
4411 .ops = {
4412 .is_visible = acoustic_limit_threshold_visible,
4413 .show = acoustic_limit_threshold_show,
4414 .store = acoustic_limit_threshold_store,
4415 },
4416 },
4417 [2] = {
4418 .name = "acoustic_target_rpm_threshold",
4419 .ops = {
4420 .is_visible = acoustic_target_threshold_visible,
4421 .show = acoustic_target_threshold_show,
4422 .store = acoustic_target_threshold_store,
4423 },
4424 },
4425 [3] = {
4426 .name = "fan_target_temperature",
4427 .ops = {
4428 .is_visible = fan_target_temperature_visible,
4429 .show = fan_target_temperature_show,
4430 .store = fan_target_temperature_store,
4431 },
4432 },
4433 [4] = {
4434 .name = "fan_minimum_pwm",
4435 .ops = {
4436 .is_visible = fan_minimum_pwm_visible,
4437 .show = fan_minimum_pwm_show,
4438 .store = fan_minimum_pwm_store,
4439 },
4440 },
4441 [5] = {
4442 .name = "fan_zero_rpm_enable",
4443 .ops = {
4444 .is_visible = fan_zero_rpm_enable_visible,
4445 .show = fan_zero_rpm_enable_show,
4446 .store = fan_zero_rpm_enable_store,
4447 },
4448 },
4449 [6] = {
4450 .name = "fan_zero_rpm_stop_temperature",
4451 .ops = {
4452 .is_visible = fan_zero_rpm_stop_temp_visible,
4453 .show = fan_zero_rpm_stop_temp_show,
4454 .store = fan_zero_rpm_stop_temp_store,
4455 },
4456 },
4457 },
4458 },
4459 },
4460 };
4461
od_kobj_release(struct kobject * kobj)4462 static void od_kobj_release(struct kobject *kobj)
4463 {
4464 struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
4465
4466 kfree(od_kobj);
4467 }
4468
4469 static const struct kobj_type od_ktype = {
4470 .release = od_kobj_release,
4471 .sysfs_ops = &kobj_sysfs_ops,
4472 };
4473
amdgpu_od_set_fini(struct amdgpu_device * adev)4474 static void amdgpu_od_set_fini(struct amdgpu_device *adev)
4475 {
4476 struct od_kobj *container, *container_next;
4477 struct od_attribute *attribute, *attribute_next;
4478
4479 if (list_empty(&adev->pm.od_kobj_list))
4480 return;
4481
4482 list_for_each_entry_safe(container, container_next,
4483 &adev->pm.od_kobj_list, entry) {
4484 list_del(&container->entry);
4485
4486 list_for_each_entry_safe(attribute, attribute_next,
4487 &container->attribute, entry) {
4488 list_del(&attribute->entry);
4489 sysfs_remove_file(&container->kobj,
4490 &attribute->attribute.attr);
4491 kfree(attribute);
4492 }
4493
4494 kobject_put(&container->kobj);
4495 }
4496 }
4497
amdgpu_is_od_feature_supported(struct amdgpu_device * adev,struct od_feature_ops * feature_ops)4498 static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4499 struct od_feature_ops *feature_ops)
4500 {
4501 umode_t mode;
4502
4503 if (!feature_ops->is_visible)
4504 return false;
4505
4506 /*
4507 * If the feature has no user read and write mode set,
4508 * we can assume the feature is actually not supported.(?)
4509 * And the revelant sysfs interface should not be exposed.
4510 */
4511 mode = feature_ops->is_visible(adev);
4512 if (mode & (S_IRUSR | S_IWUSR))
4513 return true;
4514
4515 return false;
4516 }
4517
amdgpu_od_is_self_contained(struct amdgpu_device * adev,struct od_feature_container * container)4518 static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4519 struct od_feature_container *container)
4520 {
4521 int i;
4522
4523 /*
4524 * If there is no valid entry within the container, the container
4525 * is recognized as a self contained container. And the valid entry
4526 * here means it has a valid naming and it is visible/supported by
4527 * the ASIC.
4528 */
4529 for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4530 if (container->sub_feature[i].name &&
4531 amdgpu_is_od_feature_supported(adev,
4532 &container->sub_feature[i].ops))
4533 return false;
4534 }
4535
4536 return true;
4537 }
4538
amdgpu_od_set_init(struct amdgpu_device * adev)4539 static int amdgpu_od_set_init(struct amdgpu_device *adev)
4540 {
4541 struct od_kobj *top_set, *sub_set;
4542 struct od_attribute *attribute;
4543 struct od_feature_container *container;
4544 struct od_feature_item *feature;
4545 int i, j;
4546 int ret;
4547
4548 /* Setup the top `gpu_od` directory which holds all other OD interfaces */
4549 top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
4550 if (!top_set)
4551 return -ENOMEM;
4552 list_add(&top_set->entry, &adev->pm.od_kobj_list);
4553
4554 ret = kobject_init_and_add(&top_set->kobj,
4555 &od_ktype,
4556 &adev->dev->kobj,
4557 "%s",
4558 "gpu_od");
4559 if (ret)
4560 goto err_out;
4561 INIT_LIST_HEAD(&top_set->attribute);
4562 top_set->priv = adev;
4563
4564 for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4565 container = &amdgpu_od_set.containers[i];
4566
4567 if (!container->name)
4568 continue;
4569
4570 /*
4571 * If there is valid entries within the container, the container
4572 * will be presented as a sub directory and all its holding entries
4573 * will be presented as plain files under it.
4574 * While if there is no valid entry within the container, the container
4575 * itself will be presented as a plain file under top `gpu_od` directory.
4576 */
4577 if (amdgpu_od_is_self_contained(adev, container)) {
4578 if (!amdgpu_is_od_feature_supported(adev,
4579 &container->ops))
4580 continue;
4581
4582 /*
4583 * The container is presented as a plain file under top `gpu_od`
4584 * directory.
4585 */
4586 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4587 if (!attribute) {
4588 ret = -ENOMEM;
4589 goto err_out;
4590 }
4591 list_add(&attribute->entry, &top_set->attribute);
4592
4593 attribute->attribute.attr.mode =
4594 container->ops.is_visible(adev);
4595 attribute->attribute.attr.name = container->name;
4596 attribute->attribute.show =
4597 container->ops.show;
4598 attribute->attribute.store =
4599 container->ops.store;
4600 ret = sysfs_create_file(&top_set->kobj,
4601 &attribute->attribute.attr);
4602 if (ret)
4603 goto err_out;
4604 } else {
4605 /* The container is presented as a sub directory. */
4606 sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
4607 if (!sub_set) {
4608 ret = -ENOMEM;
4609 goto err_out;
4610 }
4611 list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4612
4613 ret = kobject_init_and_add(&sub_set->kobj,
4614 &od_ktype,
4615 &top_set->kobj,
4616 "%s",
4617 container->name);
4618 if (ret)
4619 goto err_out;
4620 INIT_LIST_HEAD(&sub_set->attribute);
4621 sub_set->priv = adev;
4622
4623 for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4624 feature = &container->sub_feature[j];
4625 if (!feature->name)
4626 continue;
4627
4628 if (!amdgpu_is_od_feature_supported(adev,
4629 &feature->ops))
4630 continue;
4631
4632 /*
4633 * With the container presented as a sub directory, the entry within
4634 * it is presented as a plain file under the sub directory.
4635 */
4636 attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4637 if (!attribute) {
4638 ret = -ENOMEM;
4639 goto err_out;
4640 }
4641 list_add(&attribute->entry, &sub_set->attribute);
4642
4643 attribute->attribute.attr.mode =
4644 feature->ops.is_visible(adev);
4645 attribute->attribute.attr.name = feature->name;
4646 attribute->attribute.show =
4647 feature->ops.show;
4648 attribute->attribute.store =
4649 feature->ops.store;
4650 ret = sysfs_create_file(&sub_set->kobj,
4651 &attribute->attribute.attr);
4652 if (ret)
4653 goto err_out;
4654 }
4655 }
4656 }
4657
4658 /*
4659 * If gpu_od is the only member in the list, that means gpu_od is an
4660 * empty directory, so remove it.
4661 */
4662 if (list_is_singular(&adev->pm.od_kobj_list))
4663 goto err_out;
4664
4665 return 0;
4666
4667 err_out:
4668 amdgpu_od_set_fini(adev);
4669
4670 return ret;
4671 }
4672
amdgpu_pm_sysfs_init(struct amdgpu_device * adev)4673 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4674 {
4675 enum amdgpu_sriov_vf_mode mode;
4676 uint32_t mask = 0;
4677 uint32_t tmp;
4678 int ret;
4679
4680 if (adev->pm.sysfs_initialized)
4681 return 0;
4682
4683 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4684
4685 if (adev->pm.dpm_enabled == 0)
4686 return 0;
4687
4688 mode = amdgpu_virt_get_sriov_vf_mode(adev);
4689
4690 /* under multi-vf mode, the hwmon attributes are all not supported */
4691 if (mode != SRIOV_VF_MODE_MULTI_VF) {
4692 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4693 DRIVER_NAME, adev,
4694 hwmon_groups);
4695 if (IS_ERR(adev->pm.int_hwmon_dev)) {
4696 ret = PTR_ERR(adev->pm.int_hwmon_dev);
4697 dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4698 return ret;
4699 }
4700 }
4701
4702 switch (mode) {
4703 case SRIOV_VF_MODE_ONE_VF:
4704 mask = ATTR_FLAG_ONEVF;
4705 break;
4706 case SRIOV_VF_MODE_MULTI_VF:
4707 mask = 0;
4708 break;
4709 case SRIOV_VF_MODE_BARE_METAL:
4710 default:
4711 mask = ATTR_FLAG_MASK_ALL;
4712 break;
4713 }
4714
4715 ret = amdgpu_device_attr_create_groups(adev,
4716 amdgpu_device_attrs,
4717 ARRAY_SIZE(amdgpu_device_attrs),
4718 mask,
4719 &adev->pm.pm_attr_list);
4720 if (ret)
4721 goto err_out0;
4722
4723 if (amdgpu_dpm_is_overdrive_supported(adev)) {
4724 ret = amdgpu_od_set_init(adev);
4725 if (ret)
4726 goto err_out1;
4727 } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
4728 dev_info(adev->dev, "overdrive feature is not supported\n");
4729 }
4730
4731 if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
4732 -EOPNOTSUPP) {
4733 ret = devm_device_add_group(adev->dev,
4734 &amdgpu_pm_policy_attr_group);
4735 if (ret)
4736 goto err_out1;
4737 }
4738
4739 if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
4740 ret = devm_device_add_group(adev->dev,
4741 &amdgpu_board_attr_group);
4742 if (ret)
4743 goto err_out1;
4744 if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
4745 (void *)&tmp) != -EOPNOTSUPP) {
4746 sysfs_add_file_to_group(&adev->dev->kobj,
4747 &dev_attr_cur_node_power_limit.attr,
4748 amdgpu_board_attr_group.name);
4749 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr,
4750 amdgpu_board_attr_group.name);
4751 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr,
4752 amdgpu_board_attr_group.name);
4753 sysfs_add_file_to_group(&adev->dev->kobj,
4754 &dev_attr_max_node_power_limit.attr,
4755 amdgpu_board_attr_group.name);
4756 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr,
4757 amdgpu_board_attr_group.name);
4758 }
4759 }
4760
4761 adev->pm.sysfs_initialized = true;
4762
4763 return 0;
4764
4765 err_out1:
4766 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4767 err_out0:
4768 if (adev->pm.int_hwmon_dev)
4769 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4770
4771 return ret;
4772 }
4773
amdgpu_pm_sysfs_fini(struct amdgpu_device * adev)4774 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4775 {
4776 amdgpu_od_set_fini(adev);
4777
4778 if (adev->pm.int_hwmon_dev)
4779 hwmon_device_unregister(adev->pm.int_hwmon_dev);
4780
4781 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4782 }
4783
4784 /*
4785 * Debugfs info
4786 */
4787 #if defined(CONFIG_DEBUG_FS)
4788
amdgpu_debugfs_prints_cpu_info(struct seq_file * m,struct amdgpu_device * adev)4789 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4790 struct amdgpu_device *adev)
4791 {
4792 uint16_t *p_val;
4793 uint32_t size;
4794 int i;
4795 uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4796
4797 if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4798 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4799 GFP_KERNEL);
4800
4801 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4802 (void *)p_val, &size)) {
4803 for (i = 0; i < num_cpu_cores; i++)
4804 seq_printf(m, "\t%u MHz (CPU%d)\n",
4805 *(p_val + i), i);
4806 }
4807
4808 kfree(p_val);
4809 }
4810 }
4811
amdgpu_debugfs_pm_info_pp(struct seq_file * m,struct amdgpu_device * adev)4812 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4813 {
4814 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4815 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4816 uint32_t value;
4817 uint64_t value64 = 0;
4818 uint32_t query = 0;
4819 int size;
4820
4821 /* GPU Clocks */
4822 size = sizeof(value);
4823 seq_printf(m, "GFX Clocks and Power:\n");
4824
4825 amdgpu_debugfs_prints_cpu_info(m, adev);
4826
4827 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4828 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4829 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4830 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4831 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4832 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4833 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4834 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4835 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4836 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4837 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4838 seq_printf(m, "\t%u mV (VDDNB)\n", value);
4839 size = sizeof(uint32_t);
4840 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
4841 if (adev->flags & AMD_IS_APU)
4842 seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
4843 else
4844 seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
4845 }
4846 size = sizeof(uint32_t);
4847 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
4848 if (adev->flags & AMD_IS_APU)
4849 seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
4850 else
4851 seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
4852 }
4853 size = sizeof(value);
4854 seq_printf(m, "\n");
4855
4856 /* GPU Temp */
4857 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4858 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4859
4860 /* GPU Load */
4861 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4862 seq_printf(m, "GPU Load: %u %%\n", value);
4863 /* MEM Load */
4864 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4865 seq_printf(m, "MEM Load: %u %%\n", value);
4866 /* VCN Load */
4867 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
4868 seq_printf(m, "VCN Load: %u %%\n", value);
4869
4870 seq_printf(m, "\n");
4871
4872 /* SMC feature mask */
4873 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4874 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4875
4876 /* ASICs greater than CHIP_VEGA20 supports these sensors */
4877 if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4878 /* VCN clocks */
4879 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4880 if (!value) {
4881 seq_printf(m, "VCN: Powered down\n");
4882 } else {
4883 seq_printf(m, "VCN: Powered up\n");
4884 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4885 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4886 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4887 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4888 }
4889 }
4890 seq_printf(m, "\n");
4891 } else {
4892 /* UVD clocks */
4893 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4894 if (!value) {
4895 seq_printf(m, "UVD: Powered down\n");
4896 } else {
4897 seq_printf(m, "UVD: Powered up\n");
4898 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4899 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4900 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4901 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4902 }
4903 }
4904 seq_printf(m, "\n");
4905
4906 /* VCE clocks */
4907 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4908 if (!value) {
4909 seq_printf(m, "VCE: Powered down\n");
4910 } else {
4911 seq_printf(m, "VCE: Powered up\n");
4912 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4913 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4914 }
4915 }
4916 }
4917
4918 return 0;
4919 }
4920
4921 static const struct cg_flag_name clocks[] = {
4922 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4923 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4924 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4925 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4926 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4927 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4928 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4929 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4930 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4931 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4932 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4933 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4934 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4935 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4936 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4937 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4938 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4939 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4940 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4941 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4942 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4943 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4944 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4945 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4946 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4947 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4948 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4949 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4950 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
4951 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
4952 {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
4953 {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
4954 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
4955 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
4956 {0, NULL},
4957 };
4958
amdgpu_parse_cg_state(struct seq_file * m,u64 flags)4959 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
4960 {
4961 int i;
4962
4963 for (i = 0; clocks[i].flag; i++)
4964 seq_printf(m, "\t%s: %s\n", clocks[i].name,
4965 (flags & clocks[i].flag) ? "On" : "Off");
4966 }
4967
amdgpu_debugfs_pm_info_show(struct seq_file * m,void * unused)4968 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
4969 {
4970 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
4971 u64 flags = 0;
4972 int r;
4973
4974 r = amdgpu_pm_get_access(adev);
4975 if (r < 0)
4976 return r;
4977
4978 if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
4979 r = amdgpu_debugfs_pm_info_pp(m, adev);
4980 if (r)
4981 goto out;
4982 }
4983
4984 amdgpu_device_ip_get_clockgating_state(adev, &flags);
4985
4986 seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
4987 amdgpu_parse_cg_state(m, flags);
4988 seq_printf(m, "\n");
4989
4990 out:
4991 amdgpu_pm_put_access(adev);
4992
4993 return r;
4994 }
4995
4996 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
4997
4998 /*
4999 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
5000 *
5001 * Reads debug memory region allocated to PMFW
5002 */
amdgpu_pm_prv_buffer_read(struct file * f,char __user * buf,size_t size,loff_t * pos)5003 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
5004 size_t size, loff_t *pos)
5005 {
5006 struct amdgpu_device *adev = file_inode(f)->i_private;
5007 size_t smu_prv_buf_size;
5008 void *smu_prv_buf;
5009 int ret = 0;
5010
5011 ret = amdgpu_pm_dev_state_check(adev, true);
5012 if (ret)
5013 return ret;
5014
5015 ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
5016 if (ret)
5017 return ret;
5018
5019 if (!smu_prv_buf || !smu_prv_buf_size)
5020 return -EINVAL;
5021
5022 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
5023 smu_prv_buf_size);
5024 }
5025
5026 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
5027 .owner = THIS_MODULE,
5028 .open = simple_open,
5029 .read = amdgpu_pm_prv_buffer_read,
5030 .llseek = default_llseek,
5031 };
5032
5033 #endif
5034
amdgpu_debugfs_pm_init(struct amdgpu_device * adev)5035 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
5036 {
5037 #if defined(CONFIG_DEBUG_FS)
5038 struct drm_minor *minor = adev_to_drm(adev)->primary;
5039 struct dentry *root = minor->debugfs_root;
5040
5041 if (!adev->pm.dpm_enabled)
5042 return;
5043
5044 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
5045 &amdgpu_debugfs_pm_info_fops);
5046
5047 if (adev->pm.smu_prv_buffer_size > 0)
5048 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
5049 adev,
5050 &amdgpu_debugfs_pm_prv_buffer_fops,
5051 adev->pm.smu_prv_buffer_size);
5052
5053 amdgpu_dpm_stb_debug_fs_init(adev);
5054 #endif
5055 }
5056