1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Rafał Miłecki <zajec5@gmail.com> 23 * Alex Deucher <alexdeucher@gmail.com> 24 */ 25 26 #include "amdgpu.h" 27 #include "amdgpu_drv.h" 28 #include "amdgpu_pm.h" 29 #include "amdgpu_dpm.h" 30 #include "atom.h" 31 #include <linux/pci.h> 32 #include <linux/hwmon.h> 33 #include <linux/hwmon-sysfs.h> 34 #include <linux/nospec.h> 35 #include <linux/pm_runtime.h> 36 #include <asm/processor.h> 37 38 #define MAX_NUM_OF_FEATURES_PER_SUBSET 8 39 #define MAX_NUM_OF_SUBSETS 8 40 41 #define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name) 42 43 struct od_attribute { 44 struct kobj_attribute attribute; 45 struct list_head entry; 46 }; 47 48 struct od_kobj { 49 struct kobject kobj; 50 struct list_head entry; 51 struct list_head attribute; 52 void *priv; 53 }; 54 55 struct od_feature_ops { 56 umode_t (*is_visible)(struct amdgpu_device *adev); 57 ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, 58 char *buf); 59 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, 60 const char *buf, size_t count); 61 }; 62 63 struct od_feature_item { 64 const char *name; 65 struct od_feature_ops ops; 66 }; 67 68 struct od_feature_container { 69 char *name; 70 struct od_feature_ops ops; 71 struct od_feature_item sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET]; 72 }; 73 74 struct od_feature_set { 75 struct od_feature_container containers[MAX_NUM_OF_SUBSETS]; 76 }; 77 78 static const struct hwmon_temp_label { 79 enum PP_HWMON_TEMP channel; 80 const char *label; 81 } temp_label[] = { 82 {PP_TEMP_EDGE, "edge"}, 83 {PP_TEMP_JUNCTION, "junction"}, 84 {PP_TEMP_MEM, "mem"}, 85 }; 86 87 const char * const amdgpu_pp_profile_name[] = { 88 "BOOTUP_DEFAULT", 89 "3D_FULL_SCREEN", 90 "POWER_SAVING", 91 "VIDEO", 92 "VR", 93 "COMPUTE", 94 "CUSTOM", 95 "WINDOW_3D", 96 "CAPPED", 97 "UNCAPPED", 98 }; 99 100 /** 101 * amdgpu_pm_dev_state_check - Check if device can be accessed. 102 * @adev: Target device. 103 * @runpm: Check runpm status for suspend state checks. 104 * 105 * Checks the state of the @adev for access. Return 0 if the device is 106 * accessible or a negative error code otherwise. 107 */ 108 static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm) 109 { 110 bool runpm_check = runpm ? adev->in_runpm : false; 111 bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT); 112 113 if (amdgpu_in_reset(adev) || !full_init) 114 return -EBUSY; 115 116 if (adev->in_suspend && !runpm_check) 117 return -EBUSY; 118 119 return 0; 120 } 121 122 /** 123 * amdgpu_pm_get_access - Check if device can be accessed, resume if needed. 124 * @adev: Target device. 125 * 126 * Checks the state of the @adev for access. Use runtime pm API to resume if 127 * needed. Return 0 if the device is accessible or a negative error code 128 * otherwise. 129 */ 130 static int amdgpu_pm_get_access(struct amdgpu_device *adev) 131 { 132 int ret; 133 134 ret = amdgpu_pm_dev_state_check(adev, true); 135 if (ret) 136 return ret; 137 138 return pm_runtime_resume_and_get(adev->dev); 139 } 140 141 /** 142 * amdgpu_pm_get_access_if_active - Check if device is active for access. 143 * @adev: Target device. 144 * 145 * Checks the state of the @adev for access. Use runtime pm API to determine 146 * if device is active. Allow access only if device is active.Return 0 if the 147 * device is accessible or a negative error code otherwise. 148 */ 149 static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev) 150 { 151 int ret; 152 153 /* Ignore runpm status. If device is in suspended state, deny access */ 154 ret = amdgpu_pm_dev_state_check(adev, false); 155 if (ret) 156 return ret; 157 158 /* 159 * Allow only if device is active. If runpm is disabled also, as in 160 * kernels without CONFIG_PM, allow access. 161 */ 162 ret = pm_runtime_get_if_active(adev->dev); 163 if (!ret) 164 return -EPERM; 165 166 return 0; 167 } 168 169 /** 170 * amdgpu_pm_put_access - Put to auto suspend mode after a device access. 171 * @adev: Target device. 172 * 173 * Should be paired with amdgpu_pm_get_access* calls 174 */ 175 static inline void amdgpu_pm_put_access(struct amdgpu_device *adev) 176 { 177 pm_runtime_put_autosuspend(adev->dev); 178 } 179 180 /** 181 * DOC: power_dpm_state 182 * 183 * The power_dpm_state file is a legacy interface and is only provided for 184 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting 185 * certain power related parameters. The file power_dpm_state is used for this. 186 * It accepts the following arguments: 187 * 188 * - battery 189 * 190 * - balanced 191 * 192 * - performance 193 * 194 * battery 195 * 196 * On older GPUs, the vbios provided a special power state for battery 197 * operation. Selecting battery switched to this state. This is no 198 * longer provided on newer GPUs so the option does nothing in that case. 199 * 200 * balanced 201 * 202 * On older GPUs, the vbios provided a special power state for balanced 203 * operation. Selecting balanced switched to this state. This is no 204 * longer provided on newer GPUs so the option does nothing in that case. 205 * 206 * performance 207 * 208 * On older GPUs, the vbios provided a special power state for performance 209 * operation. Selecting performance switched to this state. This is no 210 * longer provided on newer GPUs so the option does nothing in that case. 211 * 212 */ 213 214 static ssize_t amdgpu_get_power_dpm_state(struct device *dev, 215 struct device_attribute *attr, 216 char *buf) 217 { 218 struct drm_device *ddev = dev_get_drvdata(dev); 219 struct amdgpu_device *adev = drm_to_adev(ddev); 220 enum amd_pm_state_type pm; 221 int ret; 222 223 ret = amdgpu_pm_get_access_if_active(adev); 224 if (ret) 225 return ret; 226 227 amdgpu_dpm_get_current_power_state(adev, &pm); 228 229 amdgpu_pm_put_access(adev); 230 231 return sysfs_emit(buf, "%s\n", 232 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 233 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 234 } 235 236 static ssize_t amdgpu_set_power_dpm_state(struct device *dev, 237 struct device_attribute *attr, 238 const char *buf, 239 size_t count) 240 { 241 struct drm_device *ddev = dev_get_drvdata(dev); 242 struct amdgpu_device *adev = drm_to_adev(ddev); 243 enum amd_pm_state_type state; 244 int ret; 245 246 if (sysfs_streq(buf, "battery")) 247 state = POWER_STATE_TYPE_BATTERY; 248 else if (sysfs_streq(buf, "balanced")) 249 state = POWER_STATE_TYPE_BALANCED; 250 else if (sysfs_streq(buf, "performance")) 251 state = POWER_STATE_TYPE_PERFORMANCE; 252 else 253 return -EINVAL; 254 255 ret = amdgpu_pm_get_access(adev); 256 if (ret < 0) 257 return ret; 258 259 amdgpu_dpm_set_power_state(adev, state); 260 261 amdgpu_pm_put_access(adev); 262 263 return count; 264 } 265 266 267 /** 268 * DOC: power_dpm_force_performance_level 269 * 270 * The amdgpu driver provides a sysfs API for adjusting certain power 271 * related parameters. The file power_dpm_force_performance_level is 272 * used for this. It accepts the following arguments: 273 * 274 * - auto 275 * 276 * - low 277 * 278 * - high 279 * 280 * - manual 281 * 282 * - profile_standard 283 * 284 * - profile_min_sclk 285 * 286 * - profile_min_mclk 287 * 288 * - profile_peak 289 * 290 * auto 291 * 292 * When auto is selected, the driver will attempt to dynamically select 293 * the optimal power profile for current conditions in the driver. 294 * 295 * low 296 * 297 * When low is selected, the clocks are forced to the lowest power state. 298 * 299 * high 300 * 301 * When high is selected, the clocks are forced to the highest power state. 302 * 303 * manual 304 * 305 * When manual is selected, the user can manually adjust which power states 306 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, 307 * and pp_dpm_pcie files and adjust the power state transition heuristics 308 * via the pp_power_profile_mode sysfs file. 309 * 310 * profile_standard 311 * profile_min_sclk 312 * profile_min_mclk 313 * profile_peak 314 * 315 * When the profiling modes are selected, clock and power gating are 316 * disabled and the clocks are set for different profiling cases. This 317 * mode is recommended for profiling specific work loads where you do 318 * not want clock or power gating for clock fluctuation to interfere 319 * with your results. profile_standard sets the clocks to a fixed clock 320 * level which varies from asic to asic. profile_min_sclk forces the sclk 321 * to the lowest level. profile_min_mclk forces the mclk to the lowest level. 322 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. 323 * 324 */ 325 326 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, 327 struct device_attribute *attr, 328 char *buf) 329 { 330 struct drm_device *ddev = dev_get_drvdata(dev); 331 struct amdgpu_device *adev = drm_to_adev(ddev); 332 enum amd_dpm_forced_level level = 0xff; 333 int ret; 334 335 ret = amdgpu_pm_get_access_if_active(adev); 336 if (ret) 337 return ret; 338 339 level = amdgpu_dpm_get_performance_level(adev); 340 341 amdgpu_pm_put_access(adev); 342 343 return sysfs_emit(buf, "%s\n", 344 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 345 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 346 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 347 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 348 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 349 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 350 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 351 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 352 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" : 353 "unknown"); 354 } 355 356 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, 357 struct device_attribute *attr, 358 const char *buf, 359 size_t count) 360 { 361 struct drm_device *ddev = dev_get_drvdata(dev); 362 struct amdgpu_device *adev = drm_to_adev(ddev); 363 enum amd_dpm_forced_level level; 364 int ret = 0; 365 366 if (sysfs_streq(buf, "low")) 367 level = AMD_DPM_FORCED_LEVEL_LOW; 368 else if (sysfs_streq(buf, "high")) 369 level = AMD_DPM_FORCED_LEVEL_HIGH; 370 else if (sysfs_streq(buf, "auto")) 371 level = AMD_DPM_FORCED_LEVEL_AUTO; 372 else if (sysfs_streq(buf, "manual")) 373 level = AMD_DPM_FORCED_LEVEL_MANUAL; 374 else if (sysfs_streq(buf, "profile_exit")) 375 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 376 else if (sysfs_streq(buf, "profile_standard")) 377 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 378 else if (sysfs_streq(buf, "profile_min_sclk")) 379 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 380 else if (sysfs_streq(buf, "profile_min_mclk")) 381 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 382 else if (sysfs_streq(buf, "profile_peak")) 383 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 384 else if (sysfs_streq(buf, "perf_determinism")) 385 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM; 386 else 387 return -EINVAL; 388 389 ret = amdgpu_pm_get_access(adev); 390 if (ret < 0) 391 return ret; 392 393 mutex_lock(&adev->pm.stable_pstate_ctx_lock); 394 if (amdgpu_dpm_force_performance_level(adev, level)) { 395 amdgpu_pm_put_access(adev); 396 mutex_unlock(&adev->pm.stable_pstate_ctx_lock); 397 return -EINVAL; 398 } 399 /* override whatever a user ctx may have set */ 400 adev->pm.stable_pstate_ctx = NULL; 401 mutex_unlock(&adev->pm.stable_pstate_ctx_lock); 402 403 amdgpu_pm_put_access(adev); 404 405 return count; 406 } 407 408 static ssize_t amdgpu_get_pp_num_states(struct device *dev, 409 struct device_attribute *attr, 410 char *buf) 411 { 412 struct drm_device *ddev = dev_get_drvdata(dev); 413 struct amdgpu_device *adev = drm_to_adev(ddev); 414 struct pp_states_info data; 415 uint32_t i; 416 int buf_len, ret; 417 418 ret = amdgpu_pm_get_access_if_active(adev); 419 if (ret) 420 return ret; 421 422 if (amdgpu_dpm_get_pp_num_states(adev, &data)) 423 memset(&data, 0, sizeof(data)); 424 425 amdgpu_pm_put_access(adev); 426 427 buf_len = sysfs_emit(buf, "states: %d\n", data.nums); 428 for (i = 0; i < data.nums; i++) 429 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i, 430 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 431 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 432 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 433 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 434 435 return buf_len; 436 } 437 438 static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 439 struct device_attribute *attr, 440 char *buf) 441 { 442 struct drm_device *ddev = dev_get_drvdata(dev); 443 struct amdgpu_device *adev = drm_to_adev(ddev); 444 struct pp_states_info data = {0}; 445 enum amd_pm_state_type pm = 0; 446 int i = 0, ret = 0; 447 448 ret = amdgpu_pm_get_access_if_active(adev); 449 if (ret) 450 return ret; 451 452 amdgpu_dpm_get_current_power_state(adev, &pm); 453 454 ret = amdgpu_dpm_get_pp_num_states(adev, &data); 455 456 amdgpu_pm_put_access(adev); 457 458 if (ret) 459 return ret; 460 461 for (i = 0; i < data.nums; i++) { 462 if (pm == data.states[i]) 463 break; 464 } 465 466 if (i == data.nums) 467 i = -EINVAL; 468 469 return sysfs_emit(buf, "%d\n", i); 470 } 471 472 static ssize_t amdgpu_get_pp_force_state(struct device *dev, 473 struct device_attribute *attr, 474 char *buf) 475 { 476 struct drm_device *ddev = dev_get_drvdata(dev); 477 struct amdgpu_device *adev = drm_to_adev(ddev); 478 479 if (adev->pm.pp_force_state_enabled) 480 return amdgpu_get_pp_cur_state(dev, attr, buf); 481 else 482 return sysfs_emit(buf, "\n"); 483 } 484 485 static ssize_t amdgpu_set_pp_force_state(struct device *dev, 486 struct device_attribute *attr, 487 const char *buf, 488 size_t count) 489 { 490 struct drm_device *ddev = dev_get_drvdata(dev); 491 struct amdgpu_device *adev = drm_to_adev(ddev); 492 enum amd_pm_state_type state = 0; 493 struct pp_states_info data; 494 unsigned long idx; 495 int ret; 496 497 adev->pm.pp_force_state_enabled = false; 498 499 if (strlen(buf) == 1) 500 return count; 501 502 ret = kstrtoul(buf, 0, &idx); 503 if (ret || idx >= ARRAY_SIZE(data.states)) 504 return -EINVAL; 505 506 idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 507 508 ret = amdgpu_pm_get_access(adev); 509 if (ret < 0) 510 return ret; 511 512 ret = amdgpu_dpm_get_pp_num_states(adev, &data); 513 if (ret) 514 goto err_out; 515 516 state = data.states[idx]; 517 518 /* only set user selected power states */ 519 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 520 state != POWER_STATE_TYPE_DEFAULT) { 521 ret = amdgpu_dpm_dispatch_task(adev, 522 AMD_PP_TASK_ENABLE_USER_STATE, &state); 523 if (ret) 524 goto err_out; 525 526 adev->pm.pp_force_state_enabled = true; 527 } 528 529 amdgpu_pm_put_access(adev); 530 531 return count; 532 533 err_out: 534 amdgpu_pm_put_access(adev); 535 536 return ret; 537 } 538 539 /** 540 * DOC: pp_table 541 * 542 * The amdgpu driver provides a sysfs API for uploading new powerplay 543 * tables. The file pp_table is used for this. Reading the file 544 * will dump the current power play table. Writing to the file 545 * will attempt to upload a new powerplay table and re-initialize 546 * powerplay using that new table. 547 * 548 */ 549 550 static ssize_t amdgpu_get_pp_table(struct device *dev, 551 struct device_attribute *attr, 552 char *buf) 553 { 554 struct drm_device *ddev = dev_get_drvdata(dev); 555 struct amdgpu_device *adev = drm_to_adev(ddev); 556 char *table = NULL; 557 int size, ret; 558 559 ret = amdgpu_pm_get_access_if_active(adev); 560 if (ret) 561 return ret; 562 563 size = amdgpu_dpm_get_pp_table(adev, &table); 564 565 amdgpu_pm_put_access(adev); 566 567 if (size <= 0) 568 return size; 569 570 if (size >= PAGE_SIZE) 571 size = PAGE_SIZE - 1; 572 573 memcpy(buf, table, size); 574 575 return size; 576 } 577 578 static ssize_t amdgpu_set_pp_table(struct device *dev, 579 struct device_attribute *attr, 580 const char *buf, 581 size_t count) 582 { 583 struct drm_device *ddev = dev_get_drvdata(dev); 584 struct amdgpu_device *adev = drm_to_adev(ddev); 585 int ret = 0; 586 587 ret = amdgpu_pm_get_access(adev); 588 if (ret < 0) 589 return ret; 590 591 ret = amdgpu_dpm_set_pp_table(adev, buf, count); 592 593 amdgpu_pm_put_access(adev); 594 595 if (ret) 596 return ret; 597 598 return count; 599 } 600 601 /** 602 * DOC: pp_od_clk_voltage 603 * 604 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages 605 * in each power level within a power state. The pp_od_clk_voltage is used for 606 * this. 607 * 608 * Note that the actual memory controller clock rate are exposed, not 609 * the effective memory clock of the DRAMs. To translate it, use the 610 * following formula: 611 * 612 * Clock conversion (Mhz): 613 * 614 * HBM: effective_memory_clock = memory_controller_clock * 1 615 * 616 * G5: effective_memory_clock = memory_controller_clock * 1 617 * 618 * G6: effective_memory_clock = memory_controller_clock * 2 619 * 620 * DRAM data rate (MT/s): 621 * 622 * HBM: effective_memory_clock * 2 = data_rate 623 * 624 * G5: effective_memory_clock * 4 = data_rate 625 * 626 * G6: effective_memory_clock * 8 = data_rate 627 * 628 * Bandwidth (MB/s): 629 * 630 * data_rate * vram_bit_width / 8 = memory_bandwidth 631 * 632 * Some examples: 633 * 634 * G5 on RX460: 635 * 636 * memory_controller_clock = 1750 Mhz 637 * 638 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz 639 * 640 * data rate = 1750 * 4 = 7000 MT/s 641 * 642 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s 643 * 644 * G6 on RX5700: 645 * 646 * memory_controller_clock = 875 Mhz 647 * 648 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz 649 * 650 * data rate = 1750 * 8 = 14000 MT/s 651 * 652 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s 653 * 654 * < For Vega10 and previous ASICs > 655 * 656 * Reading the file will display: 657 * 658 * - a list of engine clock levels and voltages labeled OD_SCLK 659 * 660 * - a list of memory clock levels and voltages labeled OD_MCLK 661 * 662 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE 663 * 664 * To manually adjust these settings, first select manual using 665 * power_dpm_force_performance_level. Enter a new value for each 666 * level by writing a string that contains "s/m level clock voltage" to 667 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz 668 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at 669 * 810 mV. When you have edited all of the states as needed, write 670 * "c" (commit) to the file to commit your changes. If you want to reset to the 671 * default power levels, write "r" (reset) to the file to reset them. 672 * 673 * 674 * < For Vega20 and newer ASICs > 675 * 676 * Reading the file will display: 677 * 678 * - minimum and maximum engine clock labeled OD_SCLK 679 * 680 * - minimum(not available for Vega20 and Navi1x) and maximum memory 681 * clock labeled OD_MCLK 682 * 683 * - minimum and maximum fabric clock labeled OD_FCLK (SMU13) 684 * 685 * - three <frequency, voltage> points labeled OD_VDDC_CURVE. 686 * They can be used to calibrate the sclk voltage curve. This is 687 * available for Vega20 and NV1X. 688 * 689 * - voltage offset(in mV) applied on target voltage calculation. 690 * This is available for Sienna Cichlid, Navy Flounder, Dimgrey 691 * Cavefish and some later SMU13 ASICs. For these ASICs, the target 692 * voltage calculation can be illustrated by "voltage = voltage 693 * calculated from v/f curve + overdrive vddgfx offset" 694 * 695 * - a list of valid ranges for sclk, mclk, voltage curve points 696 * or voltage offset labeled OD_RANGE 697 * 698 * < For APUs > 699 * 700 * Reading the file will display: 701 * 702 * - minimum and maximum engine clock labeled OD_SCLK 703 * 704 * - a list of valid ranges for sclk labeled OD_RANGE 705 * 706 * < For VanGogh > 707 * 708 * Reading the file will display: 709 * 710 * - minimum and maximum engine clock labeled OD_SCLK 711 * - minimum and maximum core clocks labeled OD_CCLK 712 * 713 * - a list of valid ranges for sclk and cclk labeled OD_RANGE 714 * 715 * To manually adjust these settings: 716 * 717 * - First select manual using power_dpm_force_performance_level 718 * 719 * - For clock frequency setting, enter a new value by writing a 720 * string that contains "s/m/f index clock" to the file. The index 721 * should be 0 if to set minimum clock. And 1 if to set maximum 722 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. 723 * "m 1 800" will update maximum mclk to be 800Mhz. "f 1 1600" will 724 * update maximum fabric clock to be 1600Mhz. For core 725 * clocks on VanGogh, the string contains "p core index clock". 726 * E.g., "p 2 0 800" would set the minimum core clock on core 727 * 2 to 800Mhz. 728 * 729 * For sclk voltage curve supported by Vega20 and NV1X, enter the new 730 * values by writing a string that contains "vc point clock voltage" 731 * to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300 732 * 600" will update point1 with clock set as 300Mhz and voltage as 600mV. 733 * "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and 734 * voltage 1000mV. 735 * 736 * For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey 737 * Cavefish and some later SMU13 ASICs, enter the new value by writing a 738 * string that contains "vo offset". E.g., "vo -10" will update the extra 739 * voltage offset applied to the whole v/f curve line as -10mv. 740 * 741 * - When you have edited all of the states as needed, write "c" (commit) 742 * to the file to commit your changes 743 * 744 * - If you want to reset to the default power levels, write "r" (reset) 745 * to the file to reset them 746 * 747 */ 748 749 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 750 struct device_attribute *attr, 751 const char *buf, 752 size_t count) 753 { 754 struct drm_device *ddev = dev_get_drvdata(dev); 755 struct amdgpu_device *adev = drm_to_adev(ddev); 756 int ret; 757 uint32_t parameter_size = 0; 758 long parameter[64]; 759 char buf_cpy[128]; 760 char *tmp_str; 761 char *sub_str; 762 const char delimiter[3] = {' ', '\n', '\0'}; 763 uint32_t type; 764 765 if (count > 127 || count == 0) 766 return -EINVAL; 767 768 if (*buf == 's') 769 type = PP_OD_EDIT_SCLK_VDDC_TABLE; 770 else if (*buf == 'p') 771 type = PP_OD_EDIT_CCLK_VDDC_TABLE; 772 else if (*buf == 'm') 773 type = PP_OD_EDIT_MCLK_VDDC_TABLE; 774 else if (*buf == 'f') 775 type = PP_OD_EDIT_FCLK_TABLE; 776 else if (*buf == 'r') 777 type = PP_OD_RESTORE_DEFAULT_TABLE; 778 else if (*buf == 'c') 779 type = PP_OD_COMMIT_DPM_TABLE; 780 else if (!strncmp(buf, "vc", 2)) 781 type = PP_OD_EDIT_VDDC_CURVE; 782 else if (!strncmp(buf, "vo", 2)) 783 type = PP_OD_EDIT_VDDGFX_OFFSET; 784 else 785 return -EINVAL; 786 787 memcpy(buf_cpy, buf, count); 788 buf_cpy[count] = 0; 789 790 tmp_str = buf_cpy; 791 792 if ((type == PP_OD_EDIT_VDDC_CURVE) || 793 (type == PP_OD_EDIT_VDDGFX_OFFSET)) 794 tmp_str++; 795 while (isspace(*++tmp_str)); 796 797 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { 798 if (strlen(sub_str) == 0) 799 continue; 800 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 801 if (ret) 802 return -EINVAL; 803 parameter_size++; 804 805 if (!tmp_str) 806 break; 807 808 while (isspace(*tmp_str)) 809 tmp_str++; 810 } 811 812 ret = amdgpu_pm_get_access(adev); 813 if (ret < 0) 814 return ret; 815 816 if (amdgpu_dpm_set_fine_grain_clk_vol(adev, 817 type, 818 parameter, 819 parameter_size)) 820 goto err_out; 821 822 if (amdgpu_dpm_odn_edit_dpm_table(adev, type, 823 parameter, parameter_size)) 824 goto err_out; 825 826 if (type == PP_OD_COMMIT_DPM_TABLE) { 827 if (amdgpu_dpm_dispatch_task(adev, 828 AMD_PP_TASK_READJUST_POWER_STATE, 829 NULL)) 830 goto err_out; 831 } 832 833 amdgpu_pm_put_access(adev); 834 835 return count; 836 837 err_out: 838 amdgpu_pm_put_access(adev); 839 840 return -EINVAL; 841 } 842 843 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, 844 struct device_attribute *attr, 845 char *buf) 846 { 847 struct drm_device *ddev = dev_get_drvdata(dev); 848 struct amdgpu_device *adev = drm_to_adev(ddev); 849 int size = 0; 850 int ret; 851 enum pp_clock_type od_clocks[] = { 852 OD_SCLK, 853 OD_MCLK, 854 OD_FCLK, 855 OD_VDDC_CURVE, 856 OD_RANGE, 857 OD_VDDGFX_OFFSET, 858 OD_CCLK, 859 }; 860 uint clk_index; 861 862 ret = amdgpu_pm_get_access_if_active(adev); 863 if (ret) 864 return ret; 865 866 for (clk_index = 0 ; clk_index < ARRAY_SIZE(od_clocks) ; clk_index++) { 867 amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size); 868 } 869 870 if (size == 0) 871 size = sysfs_emit(buf, "\n"); 872 873 amdgpu_pm_put_access(adev); 874 875 return size; 876 } 877 878 /** 879 * DOC: pp_features 880 * 881 * The amdgpu driver provides a sysfs API for adjusting what powerplay 882 * features to be enabled. The file pp_features is used for this. And 883 * this is only available for Vega10 and later dGPUs. 884 * 885 * Reading back the file will show you the followings: 886 * - Current ppfeature masks 887 * - List of the all supported powerplay features with their naming, 888 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). 889 * 890 * To manually enable or disable a specific feature, just set or clear 891 * the corresponding bit from original ppfeature masks and input the 892 * new ppfeature masks. 893 */ 894 static ssize_t amdgpu_set_pp_features(struct device *dev, 895 struct device_attribute *attr, 896 const char *buf, 897 size_t count) 898 { 899 struct drm_device *ddev = dev_get_drvdata(dev); 900 struct amdgpu_device *adev = drm_to_adev(ddev); 901 uint64_t featuremask; 902 int ret; 903 904 ret = kstrtou64(buf, 0, &featuremask); 905 if (ret) 906 return -EINVAL; 907 908 ret = amdgpu_pm_get_access(adev); 909 if (ret < 0) 910 return ret; 911 912 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); 913 914 amdgpu_pm_put_access(adev); 915 916 if (ret) 917 return -EINVAL; 918 919 return count; 920 } 921 922 static ssize_t amdgpu_get_pp_features(struct device *dev, 923 struct device_attribute *attr, 924 char *buf) 925 { 926 struct drm_device *ddev = dev_get_drvdata(dev); 927 struct amdgpu_device *adev = drm_to_adev(ddev); 928 ssize_t size; 929 int ret; 930 931 ret = amdgpu_pm_get_access_if_active(adev); 932 if (ret) 933 return ret; 934 935 size = amdgpu_dpm_get_ppfeature_status(adev, buf); 936 if (size <= 0) 937 size = sysfs_emit(buf, "\n"); 938 939 amdgpu_pm_put_access(adev); 940 941 return size; 942 } 943 944 /** 945 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie 946 * 947 * The amdgpu driver provides a sysfs API for adjusting what power levels 948 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 949 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for 950 * this. 951 * 952 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for 953 * Vega10 and later ASICs. 954 * pp_dpm_fclk interface is only available for Vega20 and later ASICs. 955 * 956 * Reading back the files will show you the available power levels within 957 * the power state and the clock information for those levels. If deep sleep is 958 * applied to a clock, the level will be denoted by a special level 'S:' 959 * E.g., :: 960 * 961 * S: 19Mhz * 962 * 0: 615Mhz 963 * 1: 800Mhz 964 * 2: 888Mhz 965 * 3: 1000Mhz 966 * 967 * 968 * To manually adjust these states, first select manual using 969 * power_dpm_force_performance_level. 970 * Secondly, enter a new value for each level by inputing a string that 971 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 972 * E.g., 973 * 974 * .. code-block:: bash 975 * 976 * echo "4 5 6" > pp_dpm_sclk 977 * 978 * will enable sclk levels 4, 5, and 6. 979 * 980 * NOTE: change to the dcefclk max dpm level is not supported now 981 */ 982 983 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, 984 enum pp_clock_type type, 985 char *buf) 986 { 987 struct drm_device *ddev = dev_get_drvdata(dev); 988 struct amdgpu_device *adev = drm_to_adev(ddev); 989 int size = 0; 990 int ret = 0; 991 992 ret = amdgpu_pm_get_access_if_active(adev); 993 if (ret) 994 return ret; 995 996 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size); 997 if (ret) 998 return ret; 999 1000 if (size == 0) 1001 size = sysfs_emit(buf, "\n"); 1002 1003 amdgpu_pm_put_access(adev); 1004 1005 return size; 1006 } 1007 1008 /* 1009 * Worst case: 32 bits individually specified, in octal at 12 characters 1010 * per line (+1 for \n). 1011 */ 1012 #define AMDGPU_MASK_BUF_MAX (32 * 13) 1013 1014 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) 1015 { 1016 int ret; 1017 unsigned long level; 1018 char *sub_str = NULL; 1019 char *tmp; 1020 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; 1021 const char delimiter[3] = {' ', '\n', '\0'}; 1022 size_t bytes; 1023 1024 *mask = 0; 1025 1026 bytes = min(count, sizeof(buf_cpy) - 1); 1027 memcpy(buf_cpy, buf, bytes); 1028 buf_cpy[bytes] = '\0'; 1029 tmp = buf_cpy; 1030 while ((sub_str = strsep(&tmp, delimiter)) != NULL) { 1031 if (strlen(sub_str)) { 1032 ret = kstrtoul(sub_str, 0, &level); 1033 if (ret || level > 31) 1034 return -EINVAL; 1035 *mask |= 1 << level; 1036 } else 1037 break; 1038 } 1039 1040 return 0; 1041 } 1042 1043 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, 1044 enum pp_clock_type type, 1045 const char *buf, 1046 size_t count) 1047 { 1048 struct drm_device *ddev = dev_get_drvdata(dev); 1049 struct amdgpu_device *adev = drm_to_adev(ddev); 1050 int ret; 1051 uint32_t mask = 0; 1052 1053 ret = amdgpu_read_mask(buf, count, &mask); 1054 if (ret) 1055 return ret; 1056 1057 ret = amdgpu_pm_get_access(adev); 1058 if (ret < 0) 1059 return ret; 1060 1061 ret = amdgpu_dpm_force_clock_level(adev, type, mask); 1062 1063 amdgpu_pm_put_access(adev); 1064 1065 if (ret) 1066 return -EINVAL; 1067 1068 return count; 1069 } 1070 1071 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 1072 struct device_attribute *attr, 1073 char *buf) 1074 { 1075 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf); 1076 } 1077 1078 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 1079 struct device_attribute *attr, 1080 const char *buf, 1081 size_t count) 1082 { 1083 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count); 1084 } 1085 1086 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 1087 struct device_attribute *attr, 1088 char *buf) 1089 { 1090 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf); 1091 } 1092 1093 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 1094 struct device_attribute *attr, 1095 const char *buf, 1096 size_t count) 1097 { 1098 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count); 1099 } 1100 1101 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, 1102 struct device_attribute *attr, 1103 char *buf) 1104 { 1105 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf); 1106 } 1107 1108 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, 1109 struct device_attribute *attr, 1110 const char *buf, 1111 size_t count) 1112 { 1113 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count); 1114 } 1115 1116 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, 1117 struct device_attribute *attr, 1118 char *buf) 1119 { 1120 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf); 1121 } 1122 1123 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, 1124 struct device_attribute *attr, 1125 const char *buf, 1126 size_t count) 1127 { 1128 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count); 1129 } 1130 1131 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev, 1132 struct device_attribute *attr, 1133 char *buf) 1134 { 1135 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf); 1136 } 1137 1138 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev, 1139 struct device_attribute *attr, 1140 const char *buf, 1141 size_t count) 1142 { 1143 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count); 1144 } 1145 1146 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev, 1147 struct device_attribute *attr, 1148 char *buf) 1149 { 1150 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf); 1151 } 1152 1153 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev, 1154 struct device_attribute *attr, 1155 const char *buf, 1156 size_t count) 1157 { 1158 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count); 1159 } 1160 1161 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev, 1162 struct device_attribute *attr, 1163 char *buf) 1164 { 1165 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf); 1166 } 1167 1168 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev, 1169 struct device_attribute *attr, 1170 const char *buf, 1171 size_t count) 1172 { 1173 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count); 1174 } 1175 1176 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev, 1177 struct device_attribute *attr, 1178 char *buf) 1179 { 1180 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf); 1181 } 1182 1183 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev, 1184 struct device_attribute *attr, 1185 const char *buf, 1186 size_t count) 1187 { 1188 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count); 1189 } 1190 1191 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, 1192 struct device_attribute *attr, 1193 char *buf) 1194 { 1195 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf); 1196 } 1197 1198 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, 1199 struct device_attribute *attr, 1200 const char *buf, 1201 size_t count) 1202 { 1203 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count); 1204 } 1205 1206 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 1207 struct device_attribute *attr, 1208 char *buf) 1209 { 1210 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf); 1211 } 1212 1213 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 1214 struct device_attribute *attr, 1215 const char *buf, 1216 size_t count) 1217 { 1218 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count); 1219 } 1220 1221 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 1222 struct device_attribute *attr, 1223 char *buf) 1224 { 1225 struct drm_device *ddev = dev_get_drvdata(dev); 1226 struct amdgpu_device *adev = drm_to_adev(ddev); 1227 uint32_t value = 0; 1228 int ret; 1229 1230 ret = amdgpu_pm_get_access_if_active(adev); 1231 if (ret) 1232 return ret; 1233 1234 value = amdgpu_dpm_get_sclk_od(adev); 1235 1236 amdgpu_pm_put_access(adev); 1237 1238 return sysfs_emit(buf, "%d\n", value); 1239 } 1240 1241 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 1242 struct device_attribute *attr, 1243 const char *buf, 1244 size_t count) 1245 { 1246 struct drm_device *ddev = dev_get_drvdata(dev); 1247 struct amdgpu_device *adev = drm_to_adev(ddev); 1248 int ret; 1249 long int value; 1250 1251 ret = kstrtol(buf, 0, &value); 1252 1253 if (ret) 1254 return -EINVAL; 1255 1256 ret = amdgpu_pm_get_access(adev); 1257 if (ret < 0) 1258 return ret; 1259 1260 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 1261 1262 amdgpu_pm_put_access(adev); 1263 1264 return count; 1265 } 1266 1267 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 1268 struct device_attribute *attr, 1269 char *buf) 1270 { 1271 struct drm_device *ddev = dev_get_drvdata(dev); 1272 struct amdgpu_device *adev = drm_to_adev(ddev); 1273 uint32_t value = 0; 1274 int ret; 1275 1276 ret = amdgpu_pm_get_access_if_active(adev); 1277 if (ret) 1278 return ret; 1279 1280 value = amdgpu_dpm_get_mclk_od(adev); 1281 1282 amdgpu_pm_put_access(adev); 1283 1284 return sysfs_emit(buf, "%d\n", value); 1285 } 1286 1287 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 1288 struct device_attribute *attr, 1289 const char *buf, 1290 size_t count) 1291 { 1292 struct drm_device *ddev = dev_get_drvdata(dev); 1293 struct amdgpu_device *adev = drm_to_adev(ddev); 1294 int ret; 1295 long int value; 1296 1297 ret = kstrtol(buf, 0, &value); 1298 1299 if (ret) 1300 return -EINVAL; 1301 1302 ret = amdgpu_pm_get_access(adev); 1303 if (ret < 0) 1304 return ret; 1305 1306 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 1307 1308 amdgpu_pm_put_access(adev); 1309 1310 return count; 1311 } 1312 1313 /** 1314 * DOC: pp_power_profile_mode 1315 * 1316 * The amdgpu driver provides a sysfs API for adjusting the heuristics 1317 * related to switching between power levels in a power state. The file 1318 * pp_power_profile_mode is used for this. 1319 * 1320 * Reading this file outputs a list of all of the predefined power profiles 1321 * and the relevant heuristics settings for that profile. 1322 * 1323 * To select a profile or create a custom profile, first select manual using 1324 * power_dpm_force_performance_level. Writing the number of a predefined 1325 * profile to pp_power_profile_mode will enable those heuristics. To 1326 * create a custom set of heuristics, write a string of numbers to the file 1327 * starting with the number of the custom profile along with a setting 1328 * for each heuristic parameter. Due to differences across asic families 1329 * the heuristic parameters vary from family to family. Additionally, 1330 * you can apply the custom heuristics to different clock domains. Each 1331 * clock domain is considered a distinct operation so if you modify the 1332 * gfxclk heuristics and then the memclk heuristics, the all of the 1333 * custom heuristics will be retained until you switch to another profile. 1334 * 1335 */ 1336 1337 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 1338 struct device_attribute *attr, 1339 char *buf) 1340 { 1341 struct drm_device *ddev = dev_get_drvdata(dev); 1342 struct amdgpu_device *adev = drm_to_adev(ddev); 1343 ssize_t size; 1344 int ret; 1345 1346 ret = amdgpu_pm_get_access_if_active(adev); 1347 if (ret) 1348 return ret; 1349 1350 size = amdgpu_dpm_get_power_profile_mode(adev, buf); 1351 if (size <= 0) 1352 size = sysfs_emit(buf, "\n"); 1353 1354 amdgpu_pm_put_access(adev); 1355 1356 return size; 1357 } 1358 1359 1360 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, 1361 struct device_attribute *attr, 1362 const char *buf, 1363 size_t count) 1364 { 1365 int ret; 1366 struct drm_device *ddev = dev_get_drvdata(dev); 1367 struct amdgpu_device *adev = drm_to_adev(ddev); 1368 uint32_t parameter_size = 0; 1369 long parameter[64]; 1370 char *sub_str, buf_cpy[128]; 1371 char *tmp_str; 1372 uint32_t i = 0; 1373 char tmp[2]; 1374 long int profile_mode = 0; 1375 const char delimiter[3] = {' ', '\n', '\0'}; 1376 1377 tmp[0] = *(buf); 1378 tmp[1] = '\0'; 1379 ret = kstrtol(tmp, 0, &profile_mode); 1380 if (ret) 1381 return -EINVAL; 1382 1383 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1384 if (count < 2 || count > 127) 1385 return -EINVAL; 1386 while (isspace(*++buf)) 1387 i++; 1388 memcpy(buf_cpy, buf, count-i); 1389 tmp_str = buf_cpy; 1390 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { 1391 if (strlen(sub_str) == 0) 1392 continue; 1393 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 1394 if (ret) 1395 return -EINVAL; 1396 parameter_size++; 1397 if (!tmp_str) 1398 break; 1399 while (isspace(*tmp_str)) 1400 tmp_str++; 1401 } 1402 } 1403 parameter[parameter_size] = profile_mode; 1404 1405 ret = amdgpu_pm_get_access(adev); 1406 if (ret < 0) 1407 return ret; 1408 1409 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 1410 1411 amdgpu_pm_put_access(adev); 1412 1413 if (!ret) 1414 return count; 1415 1416 return -EINVAL; 1417 } 1418 1419 static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev, 1420 enum amd_pp_sensors sensor, 1421 void *query) 1422 { 1423 int r, size = sizeof(uint32_t); 1424 1425 r = amdgpu_pm_get_access_if_active(adev); 1426 if (r) 1427 return r; 1428 1429 /* get the sensor value */ 1430 r = amdgpu_dpm_read_sensor(adev, sensor, query, &size); 1431 1432 amdgpu_pm_put_access(adev); 1433 1434 return r; 1435 } 1436 1437 /** 1438 * DOC: gpu_busy_percent 1439 * 1440 * The amdgpu driver provides a sysfs API for reading how busy the GPU 1441 * is as a percentage. The file gpu_busy_percent is used for this. 1442 * The SMU firmware computes a percentage of load based on the 1443 * aggregate activity level in the IP cores. 1444 */ 1445 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, 1446 struct device_attribute *attr, 1447 char *buf) 1448 { 1449 struct drm_device *ddev = dev_get_drvdata(dev); 1450 struct amdgpu_device *adev = drm_to_adev(ddev); 1451 unsigned int value; 1452 int r; 1453 1454 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value); 1455 if (r) 1456 return r; 1457 1458 return sysfs_emit(buf, "%d\n", value); 1459 } 1460 1461 /** 1462 * DOC: mem_busy_percent 1463 * 1464 * The amdgpu driver provides a sysfs API for reading how busy the VRAM 1465 * is as a percentage. The file mem_busy_percent is used for this. 1466 * The SMU firmware computes a percentage of load based on the 1467 * aggregate activity level in the IP cores. 1468 */ 1469 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, 1470 struct device_attribute *attr, 1471 char *buf) 1472 { 1473 struct drm_device *ddev = dev_get_drvdata(dev); 1474 struct amdgpu_device *adev = drm_to_adev(ddev); 1475 unsigned int value; 1476 int r; 1477 1478 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value); 1479 if (r) 1480 return r; 1481 1482 return sysfs_emit(buf, "%d\n", value); 1483 } 1484 1485 /** 1486 * DOC: vcn_busy_percent 1487 * 1488 * The amdgpu driver provides a sysfs API for reading how busy the VCN 1489 * is as a percentage. The file vcn_busy_percent is used for this. 1490 * The SMU firmware computes a percentage of load based on the 1491 * aggregate activity level in the IP cores. 1492 */ 1493 static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev, 1494 struct device_attribute *attr, 1495 char *buf) 1496 { 1497 struct drm_device *ddev = dev_get_drvdata(dev); 1498 struct amdgpu_device *adev = drm_to_adev(ddev); 1499 unsigned int value; 1500 int r; 1501 1502 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); 1503 if (r) 1504 return r; 1505 1506 return sysfs_emit(buf, "%d\n", value); 1507 } 1508 1509 /** 1510 * DOC: pcie_bw 1511 * 1512 * The amdgpu driver provides a sysfs API for estimating how much data 1513 * has been received and sent by the GPU in the last second through PCIe. 1514 * The file pcie_bw is used for this. 1515 * The Perf counters count the number of received and sent messages and return 1516 * those values, as well as the maximum payload size of a PCIe packet (mps). 1517 * Note that it is not possible to easily and quickly obtain the size of each 1518 * packet transmitted, so we output the max payload size (mps) to allow for 1519 * quick estimation of the PCIe bandwidth usage 1520 */ 1521 static ssize_t amdgpu_get_pcie_bw(struct device *dev, 1522 struct device_attribute *attr, 1523 char *buf) 1524 { 1525 struct drm_device *ddev = dev_get_drvdata(dev); 1526 struct amdgpu_device *adev = drm_to_adev(ddev); 1527 uint64_t count0 = 0, count1 = 0; 1528 int ret; 1529 1530 if (adev->flags & AMD_IS_APU) 1531 return -ENODATA; 1532 1533 if (!adev->asic_funcs->get_pcie_usage) 1534 return -ENODATA; 1535 1536 ret = amdgpu_pm_get_access_if_active(adev); 1537 if (ret) 1538 return ret; 1539 1540 amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 1541 1542 amdgpu_pm_put_access(adev); 1543 1544 return sysfs_emit(buf, "%llu %llu %i\n", 1545 count0, count1, pcie_get_mps(adev->pdev)); 1546 } 1547 1548 /** 1549 * DOC: unique_id 1550 * 1551 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU 1552 * The file unique_id is used for this. 1553 * This will provide a Unique ID that will persist from machine to machine 1554 * 1555 * NOTE: This will only work for GFX9 and newer. This file will be absent 1556 * on unsupported ASICs (GFX8 and older) 1557 */ 1558 static ssize_t amdgpu_get_unique_id(struct device *dev, 1559 struct device_attribute *attr, 1560 char *buf) 1561 { 1562 struct drm_device *ddev = dev_get_drvdata(dev); 1563 struct amdgpu_device *adev = drm_to_adev(ddev); 1564 1565 if (adev->unique_id) 1566 return sysfs_emit(buf, "%016llx\n", adev->unique_id); 1567 1568 return 0; 1569 } 1570 1571 /** 1572 * DOC: thermal_throttling_logging 1573 * 1574 * Thermal throttling pulls down the clock frequency and thus the performance. 1575 * It's an useful mechanism to protect the chip from overheating. Since it 1576 * impacts performance, the user controls whether it is enabled and if so, 1577 * the log frequency. 1578 * 1579 * Reading back the file shows you the status(enabled or disabled) and 1580 * the interval(in seconds) between each thermal logging. 1581 * 1582 * Writing an integer to the file, sets a new logging interval, in seconds. 1583 * The value should be between 1 and 3600. If the value is less than 1, 1584 * thermal logging is disabled. Values greater than 3600 are ignored. 1585 */ 1586 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev, 1587 struct device_attribute *attr, 1588 char *buf) 1589 { 1590 struct drm_device *ddev = dev_get_drvdata(dev); 1591 struct amdgpu_device *adev = drm_to_adev(ddev); 1592 1593 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n", 1594 adev_to_drm(adev)->unique, 1595 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", 1596 adev->throttling_logging_rs.interval / HZ + 1); 1597 } 1598 1599 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, 1600 struct device_attribute *attr, 1601 const char *buf, 1602 size_t count) 1603 { 1604 struct drm_device *ddev = dev_get_drvdata(dev); 1605 struct amdgpu_device *adev = drm_to_adev(ddev); 1606 long throttling_logging_interval; 1607 int ret = 0; 1608 1609 ret = kstrtol(buf, 0, &throttling_logging_interval); 1610 if (ret) 1611 return ret; 1612 1613 if (throttling_logging_interval > 3600) 1614 return -EINVAL; 1615 1616 if (throttling_logging_interval > 0) { 1617 /* 1618 * Reset the ratelimit timer internals. 1619 * This can effectively restart the timer. 1620 */ 1621 ratelimit_state_reset_interval(&adev->throttling_logging_rs, 1622 (throttling_logging_interval - 1) * HZ); 1623 atomic_set(&adev->throttling_logging_enabled, 1); 1624 } else { 1625 atomic_set(&adev->throttling_logging_enabled, 0); 1626 } 1627 1628 return count; 1629 } 1630 1631 /** 1632 * DOC: apu_thermal_cap 1633 * 1634 * The amdgpu driver provides a sysfs API for retrieving/updating thermal 1635 * limit temperature in millidegrees Celsius 1636 * 1637 * Reading back the file shows you core limit value 1638 * 1639 * Writing an integer to the file, sets a new thermal limit. The value 1640 * should be between 0 and 100. If the value is less than 0 or greater 1641 * than 100, then the write request will be ignored. 1642 */ 1643 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, 1644 struct device_attribute *attr, 1645 char *buf) 1646 { 1647 int ret, size; 1648 u32 limit; 1649 struct drm_device *ddev = dev_get_drvdata(dev); 1650 struct amdgpu_device *adev = drm_to_adev(ddev); 1651 1652 ret = amdgpu_pm_get_access_if_active(adev); 1653 if (ret) 1654 return ret; 1655 1656 ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit); 1657 if (!ret) 1658 size = sysfs_emit(buf, "%u\n", limit); 1659 else 1660 size = sysfs_emit(buf, "failed to get thermal limit\n"); 1661 1662 amdgpu_pm_put_access(adev); 1663 1664 return size; 1665 } 1666 1667 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, 1668 struct device_attribute *attr, 1669 const char *buf, 1670 size_t count) 1671 { 1672 int ret; 1673 u32 value; 1674 struct drm_device *ddev = dev_get_drvdata(dev); 1675 struct amdgpu_device *adev = drm_to_adev(ddev); 1676 1677 ret = kstrtou32(buf, 10, &value); 1678 if (ret) 1679 return ret; 1680 1681 if (value > 100) { 1682 dev_err(dev, "Invalid argument !\n"); 1683 return -EINVAL; 1684 } 1685 1686 ret = amdgpu_pm_get_access(adev); 1687 if (ret < 0) 1688 return ret; 1689 1690 ret = amdgpu_dpm_set_apu_thermal_limit(adev, value); 1691 if (ret) { 1692 amdgpu_pm_put_access(adev); 1693 dev_err(dev, "failed to update thermal limit\n"); 1694 return ret; 1695 } 1696 1697 amdgpu_pm_put_access(adev); 1698 1699 return count; 1700 } 1701 1702 static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev, 1703 struct amdgpu_device_attr *attr, 1704 uint32_t mask, 1705 enum amdgpu_device_attr_states *states) 1706 { 1707 if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP) 1708 *states = ATTR_STATE_UNSUPPORTED; 1709 1710 return 0; 1711 } 1712 1713 static ssize_t amdgpu_get_pm_metrics(struct device *dev, 1714 struct device_attribute *attr, char *buf) 1715 { 1716 struct drm_device *ddev = dev_get_drvdata(dev); 1717 struct amdgpu_device *adev = drm_to_adev(ddev); 1718 ssize_t size = 0; 1719 int ret; 1720 1721 ret = amdgpu_pm_get_access_if_active(adev); 1722 if (ret) 1723 return ret; 1724 1725 size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE); 1726 1727 amdgpu_pm_put_access(adev); 1728 1729 return size; 1730 } 1731 1732 /** 1733 * DOC: gpu_metrics 1734 * 1735 * The amdgpu driver provides a sysfs API for retrieving current gpu 1736 * metrics data. The file gpu_metrics is used for this. Reading the 1737 * file will dump all the current gpu metrics data. 1738 * 1739 * These data include temperature, frequency, engines utilization, 1740 * power consume, throttler status, fan speed and cpu core statistics( 1741 * available for APU only). That's it will give a snapshot of all sensors 1742 * at the same time. 1743 */ 1744 static ssize_t amdgpu_get_gpu_metrics(struct device *dev, 1745 struct device_attribute *attr, 1746 char *buf) 1747 { 1748 struct drm_device *ddev = dev_get_drvdata(dev); 1749 struct amdgpu_device *adev = drm_to_adev(ddev); 1750 void *gpu_metrics; 1751 ssize_t size = 0; 1752 int ret; 1753 1754 ret = amdgpu_pm_get_access_if_active(adev); 1755 if (ret) 1756 return ret; 1757 1758 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); 1759 if (size <= 0) 1760 goto out; 1761 1762 if (size >= PAGE_SIZE) 1763 size = PAGE_SIZE - 1; 1764 1765 memcpy(buf, gpu_metrics, size); 1766 1767 out: 1768 amdgpu_pm_put_access(adev); 1769 1770 return size; 1771 } 1772 1773 static int amdgpu_show_powershift_percent(struct device *dev, 1774 char *buf, enum amd_pp_sensors sensor) 1775 { 1776 struct drm_device *ddev = dev_get_drvdata(dev); 1777 struct amdgpu_device *adev = drm_to_adev(ddev); 1778 uint32_t ss_power; 1779 int r = 0, i; 1780 1781 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power); 1782 if (r == -EOPNOTSUPP) { 1783 /* sensor not available on dGPU, try to read from APU */ 1784 adev = NULL; 1785 mutex_lock(&mgpu_info.mutex); 1786 for (i = 0; i < mgpu_info.num_gpu; i++) { 1787 if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) { 1788 adev = mgpu_info.gpu_ins[i].adev; 1789 break; 1790 } 1791 } 1792 mutex_unlock(&mgpu_info.mutex); 1793 if (adev) 1794 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power); 1795 } 1796 1797 if (r) 1798 return r; 1799 1800 return sysfs_emit(buf, "%u%%\n", ss_power); 1801 } 1802 1803 /** 1804 * DOC: smartshift_apu_power 1805 * 1806 * The amdgpu driver provides a sysfs API for reporting APU power 1807 * shift in percentage if platform supports smartshift. Value 0 means that 1808 * there is no powershift and values between [1-100] means that the power 1809 * is shifted to APU, the percentage of boost is with respect to APU power 1810 * limit on the platform. 1811 */ 1812 1813 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr, 1814 char *buf) 1815 { 1816 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE); 1817 } 1818 1819 /** 1820 * DOC: smartshift_dgpu_power 1821 * 1822 * The amdgpu driver provides a sysfs API for reporting dGPU power 1823 * shift in percentage if platform supports smartshift. Value 0 means that 1824 * there is no powershift and values between [1-100] means that the power is 1825 * shifted to dGPU, the percentage of boost is with respect to dGPU power 1826 * limit on the platform. 1827 */ 1828 1829 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr, 1830 char *buf) 1831 { 1832 return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE); 1833 } 1834 1835 /** 1836 * DOC: smartshift_bias 1837 * 1838 * The amdgpu driver provides a sysfs API for reporting the 1839 * smartshift(SS2.0) bias level. The value ranges from -100 to 100 1840 * and the default is 0. -100 sets maximum preference to APU 1841 * and 100 sets max perference to dGPU. 1842 */ 1843 1844 static ssize_t amdgpu_get_smartshift_bias(struct device *dev, 1845 struct device_attribute *attr, 1846 char *buf) 1847 { 1848 int r = 0; 1849 1850 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias); 1851 1852 return r; 1853 } 1854 1855 static ssize_t amdgpu_set_smartshift_bias(struct device *dev, 1856 struct device_attribute *attr, 1857 const char *buf, size_t count) 1858 { 1859 struct drm_device *ddev = dev_get_drvdata(dev); 1860 struct amdgpu_device *adev = drm_to_adev(ddev); 1861 int r = 0; 1862 int bias = 0; 1863 1864 r = kstrtoint(buf, 10, &bias); 1865 if (r) 1866 goto out; 1867 1868 r = amdgpu_pm_get_access(adev); 1869 if (r < 0) 1870 return r; 1871 1872 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS) 1873 bias = AMDGPU_SMARTSHIFT_MAX_BIAS; 1874 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS) 1875 bias = AMDGPU_SMARTSHIFT_MIN_BIAS; 1876 1877 amdgpu_smartshift_bias = bias; 1878 r = count; 1879 1880 /* TODO: update bias level with SMU message */ 1881 1882 out: 1883 amdgpu_pm_put_access(adev); 1884 1885 return r; 1886 } 1887 1888 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 1889 uint32_t mask, enum amdgpu_device_attr_states *states) 1890 { 1891 if (!amdgpu_device_supports_smart_shift(adev)) 1892 *states = ATTR_STATE_UNSUPPORTED; 1893 1894 return 0; 1895 } 1896 1897 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 1898 uint32_t mask, enum amdgpu_device_attr_states *states) 1899 { 1900 uint32_t ss_power; 1901 1902 if (!amdgpu_device_supports_smart_shift(adev)) 1903 *states = ATTR_STATE_UNSUPPORTED; 1904 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, 1905 (void *)&ss_power)) 1906 *states = ATTR_STATE_UNSUPPORTED; 1907 else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, 1908 (void *)&ss_power)) 1909 *states = ATTR_STATE_UNSUPPORTED; 1910 1911 return 0; 1912 } 1913 1914 static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 1915 uint32_t mask, enum amdgpu_device_attr_states *states) 1916 { 1917 *states = ATTR_STATE_SUPPORTED; 1918 1919 if (!amdgpu_dpm_is_overdrive_supported(adev)) { 1920 *states = ATTR_STATE_UNSUPPORTED; 1921 return 0; 1922 } 1923 1924 /* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0, 12.1.0 SRIOV/BM support */ 1925 if (amdgpu_is_multi_aid(adev)) { 1926 if (amdgpu_sriov_multi_vf_mode(adev)) 1927 *states = ATTR_STATE_UNSUPPORTED; 1928 return 0; 1929 } 1930 1931 if (!(attr->flags & mask)) 1932 *states = ATTR_STATE_UNSUPPORTED; 1933 1934 return 0; 1935 } 1936 1937 static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 1938 uint32_t mask, enum amdgpu_device_attr_states *states) 1939 { 1940 struct device_attribute *dev_attr = &attr->dev_attr; 1941 uint32_t gc_ver; 1942 1943 *states = ATTR_STATE_SUPPORTED; 1944 1945 if (!(attr->flags & mask)) { 1946 *states = ATTR_STATE_UNSUPPORTED; 1947 return 0; 1948 } 1949 1950 gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 1951 /* dcefclk node is not available on gfx 11.0.3 sriov */ 1952 if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) || 1953 gc_ver < IP_VERSION(9, 0, 0) || 1954 !amdgpu_device_has_display_hardware(adev)) 1955 *states = ATTR_STATE_UNSUPPORTED; 1956 1957 /* SMU MP1 does not support dcefclk level setting, 1958 * setting should not be allowed from VF if not in one VF mode. 1959 */ 1960 if (gc_ver >= IP_VERSION(10, 0, 0) || 1961 (amdgpu_sriov_multi_vf_mode(adev))) { 1962 dev_attr->attr.mode &= ~S_IWUGO; 1963 dev_attr->store = NULL; 1964 } 1965 1966 return 0; 1967 } 1968 1969 static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 1970 uint32_t mask, enum amdgpu_device_attr_states *states) 1971 { 1972 struct device_attribute *dev_attr = &attr->dev_attr; 1973 enum amdgpu_device_attr_id attr_id = attr->attr_id; 1974 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); 1975 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 1976 1977 *states = ATTR_STATE_SUPPORTED; 1978 1979 if (!(attr->flags & mask)) { 1980 *states = ATTR_STATE_UNSUPPORTED; 1981 return 0; 1982 } 1983 1984 if (DEVICE_ATTR_IS(pp_dpm_socclk)) { 1985 if (gc_ver < IP_VERSION(9, 0, 0)) 1986 *states = ATTR_STATE_UNSUPPORTED; 1987 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { 1988 if (mp1_ver < IP_VERSION(10, 0, 0)) 1989 *states = ATTR_STATE_UNSUPPORTED; 1990 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { 1991 if (!(gc_ver == IP_VERSION(10, 3, 1) || 1992 gc_ver == IP_VERSION(10, 3, 3) || 1993 gc_ver == IP_VERSION(10, 3, 6) || 1994 gc_ver == IP_VERSION(10, 3, 7) || 1995 gc_ver == IP_VERSION(10, 3, 0) || 1996 gc_ver == IP_VERSION(10, 1, 2) || 1997 gc_ver == IP_VERSION(11, 0, 0) || 1998 gc_ver == IP_VERSION(11, 0, 1) || 1999 gc_ver == IP_VERSION(11, 0, 4) || 2000 gc_ver == IP_VERSION(11, 5, 0) || 2001 gc_ver == IP_VERSION(11, 0, 2) || 2002 gc_ver == IP_VERSION(11, 0, 3) || 2003 amdgpu_is_multi_aid(adev))) 2004 *states = ATTR_STATE_UNSUPPORTED; 2005 } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { 2006 if (!((gc_ver == IP_VERSION(10, 3, 1) || 2007 gc_ver == IP_VERSION(10, 3, 0) || 2008 gc_ver == IP_VERSION(11, 0, 2) || 2009 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) 2010 *states = ATTR_STATE_UNSUPPORTED; 2011 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { 2012 if (!(gc_ver == IP_VERSION(10, 3, 1) || 2013 gc_ver == IP_VERSION(10, 3, 3) || 2014 gc_ver == IP_VERSION(10, 3, 6) || 2015 gc_ver == IP_VERSION(10, 3, 7) || 2016 gc_ver == IP_VERSION(10, 3, 0) || 2017 gc_ver == IP_VERSION(10, 1, 2) || 2018 gc_ver == IP_VERSION(11, 0, 0) || 2019 gc_ver == IP_VERSION(11, 0, 1) || 2020 gc_ver == IP_VERSION(11, 0, 4) || 2021 gc_ver == IP_VERSION(11, 5, 0) || 2022 gc_ver == IP_VERSION(11, 0, 2) || 2023 gc_ver == IP_VERSION(11, 0, 3) || 2024 amdgpu_is_multi_aid(adev))) 2025 *states = ATTR_STATE_UNSUPPORTED; 2026 } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { 2027 if (!((gc_ver == IP_VERSION(10, 3, 1) || 2028 gc_ver == IP_VERSION(10, 3, 0) || 2029 gc_ver == IP_VERSION(11, 0, 2) || 2030 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) 2031 *states = ATTR_STATE_UNSUPPORTED; 2032 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { 2033 if (gc_ver == IP_VERSION(9, 4, 2) || 2034 amdgpu_is_multi_aid(adev)) 2035 *states = ATTR_STATE_UNSUPPORTED; 2036 } 2037 2038 switch (gc_ver) { 2039 case IP_VERSION(9, 4, 1): 2040 case IP_VERSION(9, 4, 2): 2041 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ 2042 if (DEVICE_ATTR_IS(pp_dpm_mclk) || 2043 DEVICE_ATTR_IS(pp_dpm_socclk) || 2044 DEVICE_ATTR_IS(pp_dpm_fclk)) { 2045 dev_attr->attr.mode &= ~S_IWUGO; 2046 dev_attr->store = NULL; 2047 } 2048 break; 2049 default: 2050 break; 2051 } 2052 2053 /* setting should not be allowed from VF if not in one VF mode */ 2054 if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) { 2055 dev_attr->attr.mode &= ~S_IWUGO; 2056 dev_attr->store = NULL; 2057 } 2058 2059 return 0; 2060 } 2061 2062 /** 2063 * DOC: board 2064 * 2065 * Certain SOCs can support various board attributes reporting. This is useful 2066 * for user application to monitor various board reated attributes. 2067 * 2068 * The amdgpu driver provides a sysfs API for reporting board attributes. Presently, 2069 * nine types of attributes are reported. Baseboard temperature and 2070 * gpu board temperature are reported as binary files. Npm status, current node power limit, 2071 * max node power limit, node power, global ppt residency, baseboard_power, baseboard_power_limit 2072 * is reported as ASCII text file. 2073 * 2074 * * .. code-block:: console 2075 * 2076 * hexdump /sys/bus/pci/devices/.../board/baseboard_temp 2077 * 2078 * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp 2079 * 2080 * hexdump /sys/bus/pci/devices/.../board/npm_status 2081 * 2082 * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit 2083 * 2084 * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit 2085 * 2086 * hexdump /sys/bus/pci/devices/.../board/node_power 2087 * 2088 * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid 2089 * 2090 * hexdump /sys/bus/pci/devices/.../board/baseboard_power 2091 * 2092 * hexdump /sys/bus/pci/devices/.../board/baseboard_power_limit 2093 */ 2094 2095 /** 2096 * DOC: baseboard_temp 2097 * 2098 * The amdgpu driver provides a sysfs API for retrieving current baseboard 2099 * temperature metrics data. The file baseboard_temp is used for this. 2100 * Reading the file will dump all the current baseboard temperature metrics data. 2101 */ 2102 static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev, 2103 struct device_attribute *attr, char *buf) 2104 { 2105 struct drm_device *ddev = dev_get_drvdata(dev); 2106 struct amdgpu_device *adev = drm_to_adev(ddev); 2107 ssize_t size; 2108 int ret; 2109 2110 ret = amdgpu_pm_get_access_if_active(adev); 2111 if (ret) 2112 return ret; 2113 2114 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL); 2115 if (size <= 0) 2116 goto out; 2117 if (size >= PAGE_SIZE) { 2118 ret = -ENOSPC; 2119 goto out; 2120 } 2121 2122 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf); 2123 2124 out: 2125 amdgpu_pm_put_access(adev); 2126 2127 if (ret) 2128 return ret; 2129 2130 return size; 2131 } 2132 2133 /** 2134 * DOC: gpuboard_temp 2135 * 2136 * The amdgpu driver provides a sysfs API for retrieving current gpuboard 2137 * temperature metrics data. The file gpuboard_temp is used for this. 2138 * Reading the file will dump all the current gpuboard temperature metrics data. 2139 */ 2140 static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev, 2141 struct device_attribute *attr, char *buf) 2142 { 2143 struct drm_device *ddev = dev_get_drvdata(dev); 2144 struct amdgpu_device *adev = drm_to_adev(ddev); 2145 ssize_t size; 2146 int ret; 2147 2148 ret = amdgpu_pm_get_access_if_active(adev); 2149 if (ret) 2150 return ret; 2151 2152 size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL); 2153 if (size <= 0) 2154 goto out; 2155 if (size >= PAGE_SIZE) { 2156 ret = -ENOSPC; 2157 goto out; 2158 } 2159 2160 amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf); 2161 2162 out: 2163 amdgpu_pm_put_access(adev); 2164 2165 if (ret) 2166 return ret; 2167 2168 return size; 2169 } 2170 2171 /** 2172 * DOC: cur_node_power_limit 2173 * 2174 * The amdgpu driver provides a sysfs API for retrieving current node power limit. 2175 * The file cur_node_power_limit is used for this. 2176 */ 2177 static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev, 2178 struct device_attribute *attr, char *buf) 2179 { 2180 struct drm_device *ddev = dev_get_drvdata(dev); 2181 struct amdgpu_device *adev = drm_to_adev(ddev); 2182 u32 nplimit; 2183 int r; 2184 2185 /* get the current node power limit */ 2186 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT, 2187 (void *)&nplimit); 2188 if (r) 2189 return r; 2190 2191 return sysfs_emit(buf, "%u\n", nplimit); 2192 } 2193 2194 /** 2195 * DOC: node_power 2196 * 2197 * The amdgpu driver provides a sysfs API for retrieving current node power. 2198 * The file node_power is used for this. 2199 */ 2200 static ssize_t amdgpu_show_node_power(struct device *dev, 2201 struct device_attribute *attr, char *buf) 2202 { 2203 struct drm_device *ddev = dev_get_drvdata(dev); 2204 struct amdgpu_device *adev = drm_to_adev(ddev); 2205 u32 npower; 2206 int r; 2207 2208 /* get the node power */ 2209 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER, 2210 (void *)&npower); 2211 if (r) 2212 return r; 2213 2214 return sysfs_emit(buf, "%u\n", npower); 2215 } 2216 2217 /** 2218 * DOC: npm_status 2219 * 2220 * The amdgpu driver provides a sysfs API for retrieving current node power management status. 2221 * The file npm_status is used for this. It shows the status as enabled or disabled based on 2222 * current node power value. If node power is zero, status is disabled else enabled. 2223 */ 2224 static ssize_t amdgpu_show_npm_status(struct device *dev, 2225 struct device_attribute *attr, char *buf) 2226 { 2227 struct drm_device *ddev = dev_get_drvdata(dev); 2228 struct amdgpu_device *adev = drm_to_adev(ddev); 2229 u32 npower; 2230 int r; 2231 2232 /* get the node power */ 2233 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER, 2234 (void *)&npower); 2235 if (r) 2236 return r; 2237 2238 return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled"); 2239 } 2240 2241 /** 2242 * DOC: global_ppt_resid 2243 * 2244 * The amdgpu driver provides a sysfs API for retrieving global ppt residency. 2245 * The file global_ppt_resid is used for this. 2246 */ 2247 static ssize_t amdgpu_show_global_ppt_resid(struct device *dev, 2248 struct device_attribute *attr, char *buf) 2249 { 2250 struct drm_device *ddev = dev_get_drvdata(dev); 2251 struct amdgpu_device *adev = drm_to_adev(ddev); 2252 u32 gpptresid; 2253 int r; 2254 2255 /* get the global ppt residency */ 2256 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY, 2257 (void *)&gpptresid); 2258 if (r) 2259 return r; 2260 2261 return sysfs_emit(buf, "%u\n", gpptresid); 2262 } 2263 2264 /** 2265 * DOC: max_node_power_limit 2266 * 2267 * The amdgpu driver provides a sysfs API for retrieving maximum node power limit. 2268 * The file max_node_power_limit is used for this. 2269 */ 2270 static ssize_t amdgpu_show_max_node_power_limit(struct device *dev, 2271 struct device_attribute *attr, char *buf) 2272 { 2273 struct drm_device *ddev = dev_get_drvdata(dev); 2274 struct amdgpu_device *adev = drm_to_adev(ddev); 2275 u32 max_nplimit; 2276 int r; 2277 2278 /* get the max node power limit */ 2279 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, 2280 (void *)&max_nplimit); 2281 if (r) 2282 return r; 2283 2284 return sysfs_emit(buf, "%u\n", max_nplimit); 2285 } 2286 2287 /** 2288 * DOC: baseboard_power 2289 * 2290 * The amdgpu driver provides a sysfs API for retrieving current ubb power in watts. 2291 * The file baseboard_power is used for this. 2292 */ 2293 static ssize_t amdgpu_show_baseboard_power(struct device *dev, 2294 struct device_attribute *attr, char *buf) 2295 { 2296 struct drm_device *ddev = dev_get_drvdata(dev); 2297 struct amdgpu_device *adev = drm_to_adev(ddev); 2298 u32 ubbpower; 2299 int r; 2300 2301 /* get the ubb power */ 2302 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER, 2303 (void *)&ubbpower); 2304 if (r) 2305 return r; 2306 2307 return sysfs_emit(buf, "%u\n", ubbpower); 2308 } 2309 2310 /** 2311 * DOC: baseboard_power_limit 2312 * 2313 * The amdgpu driver provides a sysfs API for retrieving threshold ubb power in watts. 2314 * The file baseboard_power_limit is used for this. 2315 */ 2316 static ssize_t amdgpu_show_baseboard_power_limit(struct device *dev, 2317 struct device_attribute *attr, char *buf) 2318 { 2319 struct drm_device *ddev = dev_get_drvdata(dev); 2320 struct amdgpu_device *adev = drm_to_adev(ddev); 2321 u32 ubbpowerlimit; 2322 int r; 2323 2324 /* get the ubb power limit */ 2325 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER_LIMIT, 2326 (void *)&ubbpowerlimit); 2327 if (r) 2328 return r; 2329 2330 return sysfs_emit(buf, "%u\n", ubbpowerlimit); 2331 } 2332 2333 static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL); 2334 static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL); 2335 static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL); 2336 static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL); 2337 static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL); 2338 static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL); 2339 static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL); 2340 static DEVICE_ATTR(baseboard_power, 0444, amdgpu_show_baseboard_power, NULL); 2341 static DEVICE_ATTR(baseboard_power_limit, 0444, amdgpu_show_baseboard_power_limit, NULL); 2342 2343 static struct attribute *board_attrs[] = { 2344 &dev_attr_baseboard_temp.attr, 2345 &dev_attr_gpuboard_temp.attr, 2346 NULL 2347 }; 2348 2349 static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n) 2350 { 2351 struct device *dev = kobj_to_dev(kobj); 2352 struct drm_device *ddev = dev_get_drvdata(dev); 2353 struct amdgpu_device *adev = drm_to_adev(ddev); 2354 2355 if (attr == &dev_attr_baseboard_temp.attr) { 2356 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD)) 2357 return 0; 2358 } 2359 2360 if (attr == &dev_attr_gpuboard_temp.attr) { 2361 if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) 2362 return 0; 2363 } 2364 2365 return attr->mode; 2366 } 2367 2368 const struct attribute_group amdgpu_board_attr_group = { 2369 .name = "board", 2370 .attrs = board_attrs, 2371 .is_visible = amdgpu_board_attr_visible, 2372 }; 2373 2374 /* pm policy attributes */ 2375 struct amdgpu_pm_policy_attr { 2376 struct device_attribute dev_attr; 2377 enum pp_pm_policy id; 2378 }; 2379 2380 /** 2381 * DOC: pm_policy 2382 * 2383 * Certain SOCs can support different power policies to optimize application 2384 * performance. However, this policy is provided only at SOC level and not at a 2385 * per-process level. This is useful especially when entire SOC is utilized for 2386 * dedicated workload. 2387 * 2388 * The amdgpu driver provides a sysfs API for selecting the policy. Presently, 2389 * only two types of policies are supported through this interface. 2390 * 2391 * Pstate Policy Selection - This is to select different Pstate profiles which 2392 * decides clock/throttling preferences. 2393 * 2394 * XGMI PLPD Policy Selection - When multiple devices are connected over XGMI, 2395 * this helps to select policy to be applied for per link power down. 2396 * 2397 * The list of available policies and policy levels vary between SOCs. They can 2398 * be viewed under pm_policy node directory. If SOC doesn't support any policy, 2399 * this node won't be available. The different policies supported will be 2400 * available as separate nodes under pm_policy. 2401 * 2402 * cat /sys/bus/pci/devices/.../pm_policy/<policy_type> 2403 * 2404 * Reading the policy file shows the different levels supported. The level which 2405 * is applied presently is denoted by * (asterisk). E.g., 2406 * 2407 * .. code-block:: console 2408 * 2409 * cat /sys/bus/pci/devices/.../pm_policy/soc_pstate 2410 * 0 : soc_pstate_default 2411 * 1 : soc_pstate_0 2412 * 2 : soc_pstate_1* 2413 * 3 : soc_pstate_2 2414 * 2415 * cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd 2416 * 0 : plpd_disallow 2417 * 1 : plpd_default 2418 * 2 : plpd_optimized* 2419 * 2420 * To apply a specific policy 2421 * 2422 * "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>" 2423 * 2424 * For the levels listed in the example above, to select "plpd_optimized" for 2425 * XGMI and "soc_pstate_2" for soc pstate policy - 2426 * 2427 * .. code-block:: console 2428 * 2429 * echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd 2430 * echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate 2431 * 2432 */ 2433 static ssize_t amdgpu_get_pm_policy_attr(struct device *dev, 2434 struct device_attribute *attr, 2435 char *buf) 2436 { 2437 struct drm_device *ddev = dev_get_drvdata(dev); 2438 struct amdgpu_device *adev = drm_to_adev(ddev); 2439 struct amdgpu_pm_policy_attr *policy_attr; 2440 2441 policy_attr = 2442 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); 2443 2444 return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf); 2445 } 2446 2447 static ssize_t amdgpu_set_pm_policy_attr(struct device *dev, 2448 struct device_attribute *attr, 2449 const char *buf, size_t count) 2450 { 2451 struct drm_device *ddev = dev_get_drvdata(dev); 2452 struct amdgpu_device *adev = drm_to_adev(ddev); 2453 struct amdgpu_pm_policy_attr *policy_attr; 2454 int ret, num_params = 0; 2455 char delimiter[] = " \n\t"; 2456 char tmp_buf[128]; 2457 char *tmp, *param; 2458 long val; 2459 2460 count = min(count, sizeof(tmp_buf)); 2461 memcpy(tmp_buf, buf, count); 2462 tmp_buf[count - 1] = '\0'; 2463 tmp = tmp_buf; 2464 2465 tmp = skip_spaces(tmp); 2466 while ((param = strsep(&tmp, delimiter))) { 2467 if (!strlen(param)) { 2468 tmp = skip_spaces(tmp); 2469 continue; 2470 } 2471 ret = kstrtol(param, 0, &val); 2472 if (ret) 2473 return -EINVAL; 2474 num_params++; 2475 if (num_params > 1) 2476 return -EINVAL; 2477 } 2478 2479 if (num_params != 1) 2480 return -EINVAL; 2481 2482 policy_attr = 2483 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr); 2484 2485 ret = amdgpu_pm_get_access(adev); 2486 if (ret < 0) 2487 return ret; 2488 2489 ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val); 2490 2491 amdgpu_pm_put_access(adev); 2492 2493 if (ret) 2494 return ret; 2495 2496 return count; 2497 } 2498 2499 #define AMDGPU_PM_POLICY_ATTR(_name, _id) \ 2500 static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \ 2501 .dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \ 2502 amdgpu_set_pm_policy_attr), \ 2503 .id = PP_PM_POLICY_##_id, \ 2504 }; 2505 2506 #define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr 2507 2508 AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE) 2509 AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD) 2510 2511 static struct attribute *pm_policy_attrs[] = { 2512 &AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate), 2513 &AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd), 2514 NULL 2515 }; 2516 2517 static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj, 2518 struct attribute *attr, int n) 2519 { 2520 struct device *dev = kobj_to_dev(kobj); 2521 struct drm_device *ddev = dev_get_drvdata(dev); 2522 struct amdgpu_device *adev = drm_to_adev(ddev); 2523 struct amdgpu_pm_policy_attr *policy_attr; 2524 2525 policy_attr = 2526 container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr); 2527 2528 if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) == 2529 -ENOENT) 2530 return 0; 2531 2532 return attr->mode; 2533 } 2534 2535 const struct attribute_group amdgpu_pm_policy_attr_group = { 2536 .name = "pm_policy", 2537 .attrs = pm_policy_attrs, 2538 .is_visible = amdgpu_pm_policy_attr_visible, 2539 }; 2540 2541 static struct amdgpu_device_attr amdgpu_device_attrs[] = { 2542 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2543 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2544 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2545 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2546 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2547 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC), 2548 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2549 .attr_update = pp_dpm_clk_default_attr_update), 2550 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2551 .attr_update = pp_dpm_clk_default_attr_update), 2552 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2553 .attr_update = pp_dpm_clk_default_attr_update), 2554 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2555 .attr_update = pp_dpm_clk_default_attr_update), 2556 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2557 .attr_update = pp_dpm_clk_default_attr_update), 2558 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2559 .attr_update = pp_dpm_clk_default_attr_update), 2560 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2561 .attr_update = pp_dpm_clk_default_attr_update), 2562 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2563 .attr_update = pp_dpm_clk_default_attr_update), 2564 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2565 .attr_update = pp_dpm_dcefclk_attr_update), 2566 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, 2567 .attr_update = pp_dpm_clk_default_attr_update), 2568 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), 2569 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), 2570 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2571 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC, 2572 .attr_update = pp_od_clk_voltage_attr_update), 2573 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2574 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2575 AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2576 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), 2577 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2578 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2579 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2580 AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2581 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 2582 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC, 2583 .attr_update = ss_power_attr_update), 2584 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC, 2585 .attr_update = ss_power_attr_update), 2586 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC, 2587 .attr_update = ss_bias_attr_update), 2588 AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC, 2589 .attr_update = amdgpu_pm_metrics_attr_update), 2590 }; 2591 2592 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 2593 uint32_t mask, enum amdgpu_device_attr_states *states) 2594 { 2595 struct device_attribute *dev_attr = &attr->dev_attr; 2596 enum amdgpu_device_attr_id attr_id = attr->attr_id; 2597 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 2598 2599 if (!(attr->flags & mask)) { 2600 *states = ATTR_STATE_UNSUPPORTED; 2601 return 0; 2602 } 2603 2604 if (DEVICE_ATTR_IS(mem_busy_percent)) { 2605 if ((adev->flags & AMD_IS_APU && 2606 gc_ver != IP_VERSION(9, 4, 3)) || 2607 gc_ver == IP_VERSION(9, 0, 1)) 2608 *states = ATTR_STATE_UNSUPPORTED; 2609 } else if (DEVICE_ATTR_IS(vcn_busy_percent)) { 2610 if (!(gc_ver == IP_VERSION(9, 3, 0) || 2611 gc_ver == IP_VERSION(10, 3, 1) || 2612 gc_ver == IP_VERSION(10, 3, 3) || 2613 gc_ver == IP_VERSION(10, 3, 6) || 2614 gc_ver == IP_VERSION(10, 3, 7) || 2615 gc_ver == IP_VERSION(11, 0, 0) || 2616 gc_ver == IP_VERSION(11, 0, 1) || 2617 gc_ver == IP_VERSION(11, 0, 2) || 2618 gc_ver == IP_VERSION(11, 0, 3) || 2619 gc_ver == IP_VERSION(11, 0, 4) || 2620 gc_ver == IP_VERSION(11, 5, 0) || 2621 gc_ver == IP_VERSION(11, 5, 1) || 2622 gc_ver == IP_VERSION(11, 5, 2) || 2623 gc_ver == IP_VERSION(11, 5, 3) || 2624 gc_ver == IP_VERSION(12, 0, 0) || 2625 gc_ver == IP_VERSION(12, 0, 1))) 2626 *states = ATTR_STATE_UNSUPPORTED; 2627 } else if (DEVICE_ATTR_IS(pcie_bw)) { 2628 /* PCIe Perf counters won't work on APU nodes */ 2629 if (adev->flags & AMD_IS_APU || 2630 !adev->asic_funcs->get_pcie_usage) 2631 *states = ATTR_STATE_UNSUPPORTED; 2632 } else if (DEVICE_ATTR_IS(unique_id)) { 2633 switch (gc_ver) { 2634 case IP_VERSION(9, 0, 1): 2635 case IP_VERSION(9, 4, 0): 2636 case IP_VERSION(9, 4, 1): 2637 case IP_VERSION(9, 4, 2): 2638 case IP_VERSION(9, 4, 3): 2639 case IP_VERSION(9, 4, 4): 2640 case IP_VERSION(9, 5, 0): 2641 case IP_VERSION(10, 3, 0): 2642 case IP_VERSION(11, 0, 0): 2643 case IP_VERSION(11, 0, 1): 2644 case IP_VERSION(11, 0, 2): 2645 case IP_VERSION(11, 0, 3): 2646 case IP_VERSION(12, 0, 0): 2647 case IP_VERSION(12, 0, 1): 2648 case IP_VERSION(12, 1, 0): 2649 *states = ATTR_STATE_SUPPORTED; 2650 break; 2651 default: 2652 *states = ATTR_STATE_UNSUPPORTED; 2653 } 2654 } else if (DEVICE_ATTR_IS(pp_features)) { 2655 if ((adev->flags & AMD_IS_APU && 2656 gc_ver != IP_VERSION(9, 4, 3)) || 2657 gc_ver < IP_VERSION(9, 0, 0)) 2658 *states = ATTR_STATE_UNSUPPORTED; 2659 } else if (DEVICE_ATTR_IS(gpu_metrics)) { 2660 if (gc_ver < IP_VERSION(9, 1, 0)) 2661 *states = ATTR_STATE_UNSUPPORTED; 2662 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { 2663 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) 2664 *states = ATTR_STATE_UNSUPPORTED; 2665 else if ((gc_ver == IP_VERSION(10, 3, 0) || 2666 gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev)) 2667 *states = ATTR_STATE_UNSUPPORTED; 2668 } else if (DEVICE_ATTR_IS(pp_mclk_od)) { 2669 if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP) 2670 *states = ATTR_STATE_UNSUPPORTED; 2671 } else if (DEVICE_ATTR_IS(pp_sclk_od)) { 2672 if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP) 2673 *states = ATTR_STATE_UNSUPPORTED; 2674 } else if (DEVICE_ATTR_IS(apu_thermal_cap)) { 2675 u32 limit; 2676 2677 if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) == 2678 -EOPNOTSUPP) 2679 *states = ATTR_STATE_UNSUPPORTED; 2680 } else if (DEVICE_ATTR_IS(pp_table)) { 2681 int ret; 2682 char *tmp = NULL; 2683 2684 ret = amdgpu_dpm_get_pp_table(adev, &tmp); 2685 if (ret == -EOPNOTSUPP || !tmp) 2686 *states = ATTR_STATE_UNSUPPORTED; 2687 else 2688 *states = ATTR_STATE_SUPPORTED; 2689 } 2690 2691 switch (gc_ver) { 2692 case IP_VERSION(10, 3, 0): 2693 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) && 2694 amdgpu_sriov_vf(adev)) { 2695 dev_attr->attr.mode &= ~0222; 2696 dev_attr->store = NULL; 2697 } 2698 break; 2699 default: 2700 break; 2701 } 2702 2703 return 0; 2704 } 2705 2706 2707 static int amdgpu_device_attr_create(struct amdgpu_device *adev, 2708 struct amdgpu_device_attr *attr, 2709 uint32_t mask, struct list_head *attr_list) 2710 { 2711 int ret = 0; 2712 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; 2713 struct amdgpu_device_attr_entry *attr_entry; 2714 struct device_attribute *dev_attr; 2715 const char *name; 2716 2717 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 2718 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; 2719 2720 if (!attr) 2721 return -EINVAL; 2722 2723 dev_attr = &attr->dev_attr; 2724 name = dev_attr->attr.name; 2725 2726 attr_update = attr->attr_update ? attr->attr_update : default_attr_update; 2727 2728 ret = attr_update(adev, attr, mask, &attr_states); 2729 if (ret) { 2730 dev_err(adev->dev, "failed to update device file %s, ret = %d\n", 2731 name, ret); 2732 return ret; 2733 } 2734 2735 if (attr_states == ATTR_STATE_UNSUPPORTED) 2736 return 0; 2737 2738 ret = device_create_file(adev->dev, dev_attr); 2739 if (ret) { 2740 dev_err(adev->dev, "failed to create device file %s, ret = %d\n", 2741 name, ret); 2742 } 2743 2744 attr_entry = kmalloc_obj(*attr_entry); 2745 if (!attr_entry) 2746 return -ENOMEM; 2747 2748 attr_entry->attr = attr; 2749 INIT_LIST_HEAD(&attr_entry->entry); 2750 2751 list_add_tail(&attr_entry->entry, attr_list); 2752 2753 return ret; 2754 } 2755 2756 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr) 2757 { 2758 struct device_attribute *dev_attr = &attr->dev_attr; 2759 2760 device_remove_file(adev->dev, dev_attr); 2761 } 2762 2763 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 2764 struct list_head *attr_list); 2765 2766 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev, 2767 struct amdgpu_device_attr *attrs, 2768 uint32_t counts, 2769 uint32_t mask, 2770 struct list_head *attr_list) 2771 { 2772 int ret = 0; 2773 uint32_t i = 0; 2774 2775 for (i = 0; i < counts; i++) { 2776 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list); 2777 if (ret) 2778 goto failed; 2779 } 2780 2781 return 0; 2782 2783 failed: 2784 amdgpu_device_attr_remove_groups(adev, attr_list); 2785 2786 return ret; 2787 } 2788 2789 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 2790 struct list_head *attr_list) 2791 { 2792 struct amdgpu_device_attr_entry *entry, *entry_tmp; 2793 2794 if (list_empty(attr_list)) 2795 return ; 2796 2797 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) { 2798 amdgpu_device_attr_remove(adev, entry->attr); 2799 list_del(&entry->entry); 2800 kfree(entry); 2801 } 2802 } 2803 2804 static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 2805 struct device_attribute *attr, 2806 char *buf) 2807 { 2808 struct amdgpu_device *adev = dev_get_drvdata(dev); 2809 int channel = to_sensor_dev_attr(attr)->index; 2810 int r, temp = 0; 2811 2812 if (channel >= PP_TEMP_MAX) 2813 return -EINVAL; 2814 2815 switch (channel) { 2816 case PP_TEMP_JUNCTION: 2817 /* get current junction temperature */ 2818 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 2819 (void *)&temp); 2820 break; 2821 case PP_TEMP_EDGE: 2822 /* get current edge temperature */ 2823 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 2824 (void *)&temp); 2825 break; 2826 case PP_TEMP_MEM: 2827 /* get current memory temperature */ 2828 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 2829 (void *)&temp); 2830 break; 2831 default: 2832 r = -EINVAL; 2833 break; 2834 } 2835 2836 if (r) 2837 return r; 2838 2839 return sysfs_emit(buf, "%d\n", temp); 2840 } 2841 2842 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 2843 struct device_attribute *attr, 2844 char *buf) 2845 { 2846 struct amdgpu_device *adev = dev_get_drvdata(dev); 2847 int hyst = to_sensor_dev_attr(attr)->index; 2848 int temp; 2849 2850 if (hyst) 2851 temp = adev->pm.dpm.thermal.min_temp; 2852 else 2853 temp = adev->pm.dpm.thermal.max_temp; 2854 2855 return sysfs_emit(buf, "%d\n", temp); 2856 } 2857 2858 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, 2859 struct device_attribute *attr, 2860 char *buf) 2861 { 2862 struct amdgpu_device *adev = dev_get_drvdata(dev); 2863 int hyst = to_sensor_dev_attr(attr)->index; 2864 int temp; 2865 2866 if (hyst) 2867 temp = adev->pm.dpm.thermal.min_hotspot_temp; 2868 else 2869 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; 2870 2871 return sysfs_emit(buf, "%d\n", temp); 2872 } 2873 2874 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, 2875 struct device_attribute *attr, 2876 char *buf) 2877 { 2878 struct amdgpu_device *adev = dev_get_drvdata(dev); 2879 int hyst = to_sensor_dev_attr(attr)->index; 2880 int temp; 2881 2882 if (hyst) 2883 temp = adev->pm.dpm.thermal.min_mem_temp; 2884 else 2885 temp = adev->pm.dpm.thermal.max_mem_crit_temp; 2886 2887 return sysfs_emit(buf, "%d\n", temp); 2888 } 2889 2890 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, 2891 struct device_attribute *attr, 2892 char *buf) 2893 { 2894 int channel = to_sensor_dev_attr(attr)->index; 2895 2896 if (channel >= PP_TEMP_MAX) 2897 return -EINVAL; 2898 2899 return sysfs_emit(buf, "%s\n", temp_label[channel].label); 2900 } 2901 2902 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, 2903 struct device_attribute *attr, 2904 char *buf) 2905 { 2906 struct amdgpu_device *adev = dev_get_drvdata(dev); 2907 int channel = to_sensor_dev_attr(attr)->index; 2908 int temp = 0; 2909 2910 if (channel >= PP_TEMP_MAX) 2911 return -EINVAL; 2912 2913 switch (channel) { 2914 case PP_TEMP_JUNCTION: 2915 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp; 2916 break; 2917 case PP_TEMP_EDGE: 2918 temp = adev->pm.dpm.thermal.max_edge_emergency_temp; 2919 break; 2920 case PP_TEMP_MEM: 2921 temp = adev->pm.dpm.thermal.max_mem_emergency_temp; 2922 break; 2923 } 2924 2925 return sysfs_emit(buf, "%d\n", temp); 2926 } 2927 2928 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 2929 struct device_attribute *attr, 2930 char *buf) 2931 { 2932 struct amdgpu_device *adev = dev_get_drvdata(dev); 2933 u32 pwm_mode = 0; 2934 int ret; 2935 2936 ret = amdgpu_pm_get_access_if_active(adev); 2937 if (ret) 2938 return ret; 2939 2940 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); 2941 2942 amdgpu_pm_put_access(adev); 2943 2944 if (ret) 2945 return -EINVAL; 2946 2947 return sysfs_emit(buf, "%u\n", pwm_mode); 2948 } 2949 2950 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 2951 struct device_attribute *attr, 2952 const char *buf, 2953 size_t count) 2954 { 2955 struct amdgpu_device *adev = dev_get_drvdata(dev); 2956 int err, ret; 2957 u32 pwm_mode; 2958 int value; 2959 2960 err = kstrtoint(buf, 10, &value); 2961 if (err) 2962 return err; 2963 2964 if (value == 0) 2965 pwm_mode = AMD_FAN_CTRL_NONE; 2966 else if (value == 1) 2967 pwm_mode = AMD_FAN_CTRL_MANUAL; 2968 else if (value == 2) 2969 pwm_mode = AMD_FAN_CTRL_AUTO; 2970 else 2971 return -EINVAL; 2972 2973 ret = amdgpu_pm_get_access(adev); 2974 if (ret < 0) 2975 return ret; 2976 2977 ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 2978 2979 amdgpu_pm_put_access(adev); 2980 2981 if (ret) 2982 return -EINVAL; 2983 2984 return count; 2985 } 2986 2987 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 2988 struct device_attribute *attr, 2989 char *buf) 2990 { 2991 return sysfs_emit(buf, "%i\n", 0); 2992 } 2993 2994 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 2995 struct device_attribute *attr, 2996 char *buf) 2997 { 2998 return sysfs_emit(buf, "%i\n", 255); 2999 } 3000 3001 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 3002 struct device_attribute *attr, 3003 const char *buf, size_t count) 3004 { 3005 struct amdgpu_device *adev = dev_get_drvdata(dev); 3006 int err; 3007 u32 value; 3008 u32 pwm_mode; 3009 3010 err = kstrtou32(buf, 10, &value); 3011 if (err) 3012 return err; 3013 3014 err = amdgpu_pm_get_access(adev); 3015 if (err < 0) 3016 return err; 3017 3018 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); 3019 if (err) 3020 goto out; 3021 3022 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 3023 pr_info("manual fan speed control should be enabled first\n"); 3024 err = -EINVAL; 3025 goto out; 3026 } 3027 3028 err = amdgpu_dpm_set_fan_speed_pwm(adev, value); 3029 3030 out: 3031 amdgpu_pm_put_access(adev); 3032 3033 if (err) 3034 return err; 3035 3036 return count; 3037 } 3038 3039 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 3040 struct device_attribute *attr, 3041 char *buf) 3042 { 3043 struct amdgpu_device *adev = dev_get_drvdata(dev); 3044 int err; 3045 u32 speed = 0; 3046 3047 err = amdgpu_pm_get_access_if_active(adev); 3048 if (err) 3049 return err; 3050 3051 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); 3052 3053 amdgpu_pm_put_access(adev); 3054 3055 if (err) 3056 return err; 3057 3058 return sysfs_emit(buf, "%i\n", speed); 3059 } 3060 3061 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 3062 struct device_attribute *attr, 3063 char *buf) 3064 { 3065 struct amdgpu_device *adev = dev_get_drvdata(dev); 3066 int err; 3067 u32 speed = 0; 3068 3069 err = amdgpu_pm_get_access_if_active(adev); 3070 if (err) 3071 return err; 3072 3073 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 3074 3075 amdgpu_pm_put_access(adev); 3076 3077 if (err) 3078 return err; 3079 3080 return sysfs_emit(buf, "%i\n", speed); 3081 } 3082 3083 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, 3084 struct device_attribute *attr, 3085 char *buf) 3086 { 3087 struct amdgpu_device *adev = dev_get_drvdata(dev); 3088 u32 min_rpm = 0; 3089 int r; 3090 3091 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 3092 (void *)&min_rpm); 3093 3094 if (r) 3095 return r; 3096 3097 return sysfs_emit(buf, "%d\n", min_rpm); 3098 } 3099 3100 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, 3101 struct device_attribute *attr, 3102 char *buf) 3103 { 3104 struct amdgpu_device *adev = dev_get_drvdata(dev); 3105 u32 max_rpm = 0; 3106 int r; 3107 3108 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 3109 (void *)&max_rpm); 3110 3111 if (r) 3112 return r; 3113 3114 return sysfs_emit(buf, "%d\n", max_rpm); 3115 } 3116 3117 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, 3118 struct device_attribute *attr, 3119 char *buf) 3120 { 3121 struct amdgpu_device *adev = dev_get_drvdata(dev); 3122 int err; 3123 u32 rpm = 0; 3124 3125 err = amdgpu_pm_get_access_if_active(adev); 3126 if (err) 3127 return err; 3128 3129 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); 3130 3131 amdgpu_pm_put_access(adev); 3132 3133 if (err) 3134 return err; 3135 3136 return sysfs_emit(buf, "%i\n", rpm); 3137 } 3138 3139 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, 3140 struct device_attribute *attr, 3141 const char *buf, size_t count) 3142 { 3143 struct amdgpu_device *adev = dev_get_drvdata(dev); 3144 int err; 3145 u32 value; 3146 u32 pwm_mode; 3147 3148 err = kstrtou32(buf, 10, &value); 3149 if (err) 3150 return err; 3151 3152 err = amdgpu_pm_get_access(adev); 3153 if (err < 0) 3154 return err; 3155 3156 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); 3157 if (err) 3158 goto out; 3159 3160 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 3161 err = -ENODATA; 3162 goto out; 3163 } 3164 3165 err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 3166 3167 out: 3168 amdgpu_pm_put_access(adev); 3169 3170 if (err) 3171 return err; 3172 3173 return count; 3174 } 3175 3176 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, 3177 struct device_attribute *attr, 3178 char *buf) 3179 { 3180 struct amdgpu_device *adev = dev_get_drvdata(dev); 3181 u32 pwm_mode = 0; 3182 int ret; 3183 3184 ret = amdgpu_pm_get_access_if_active(adev); 3185 if (ret) 3186 return ret; 3187 3188 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); 3189 3190 amdgpu_pm_put_access(adev); 3191 3192 if (ret) 3193 return -EINVAL; 3194 3195 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); 3196 } 3197 3198 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, 3199 struct device_attribute *attr, 3200 const char *buf, 3201 size_t count) 3202 { 3203 struct amdgpu_device *adev = dev_get_drvdata(dev); 3204 int err; 3205 int value; 3206 u32 pwm_mode; 3207 3208 err = kstrtoint(buf, 10, &value); 3209 if (err) 3210 return err; 3211 3212 if (value == 0) 3213 pwm_mode = AMD_FAN_CTRL_AUTO; 3214 else if (value == 1) 3215 pwm_mode = AMD_FAN_CTRL_MANUAL; 3216 else 3217 return -EINVAL; 3218 3219 err = amdgpu_pm_get_access(adev); 3220 if (err < 0) 3221 return err; 3222 3223 err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 3224 3225 amdgpu_pm_put_access(adev); 3226 3227 if (err) 3228 return -EINVAL; 3229 3230 return count; 3231 } 3232 3233 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, 3234 struct device_attribute *attr, 3235 char *buf) 3236 { 3237 struct amdgpu_device *adev = dev_get_drvdata(dev); 3238 u32 vddgfx; 3239 int r; 3240 3241 /* get the voltage */ 3242 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX, 3243 (void *)&vddgfx); 3244 if (r) 3245 return r; 3246 3247 return sysfs_emit(buf, "%d\n", vddgfx); 3248 } 3249 3250 static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev, 3251 struct device_attribute *attr, 3252 char *buf) 3253 { 3254 struct amdgpu_device *adev = dev_get_drvdata(dev); 3255 u32 vddboard; 3256 int r; 3257 3258 /* get the voltage */ 3259 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, 3260 (void *)&vddboard); 3261 if (r) 3262 return r; 3263 3264 return sysfs_emit(buf, "%d\n", vddboard); 3265 } 3266 3267 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, 3268 struct device_attribute *attr, 3269 char *buf) 3270 { 3271 return sysfs_emit(buf, "vddgfx\n"); 3272 } 3273 3274 static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev, 3275 struct device_attribute *attr, 3276 char *buf) 3277 { 3278 return sysfs_emit(buf, "vddboard\n"); 3279 } 3280 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, 3281 struct device_attribute *attr, 3282 char *buf) 3283 { 3284 struct amdgpu_device *adev = dev_get_drvdata(dev); 3285 u32 vddnb; 3286 int r; 3287 3288 /* only APUs have vddnb */ 3289 if (!(adev->flags & AMD_IS_APU)) 3290 return -EINVAL; 3291 3292 /* get the voltage */ 3293 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB, 3294 (void *)&vddnb); 3295 if (r) 3296 return r; 3297 3298 return sysfs_emit(buf, "%d\n", vddnb); 3299 } 3300 3301 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, 3302 struct device_attribute *attr, 3303 char *buf) 3304 { 3305 return sysfs_emit(buf, "vddnb\n"); 3306 } 3307 3308 static int amdgpu_hwmon_get_power(struct device *dev, 3309 enum amd_pp_sensors sensor) 3310 { 3311 struct amdgpu_device *adev = dev_get_drvdata(dev); 3312 unsigned int uw; 3313 u32 query = 0; 3314 int r; 3315 3316 r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query); 3317 if (r) 3318 return r; 3319 3320 /* convert to microwatts */ 3321 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; 3322 3323 return uw; 3324 } 3325 3326 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, 3327 struct device_attribute *attr, 3328 char *buf) 3329 { 3330 ssize_t val; 3331 3332 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER); 3333 if (val < 0) 3334 return val; 3335 3336 return sysfs_emit(buf, "%zd\n", val); 3337 } 3338 3339 static ssize_t amdgpu_hwmon_show_power_input(struct device *dev, 3340 struct device_attribute *attr, 3341 char *buf) 3342 { 3343 ssize_t val; 3344 3345 val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER); 3346 if (val < 0) 3347 return val; 3348 3349 return sysfs_emit(buf, "%zd\n", val); 3350 } 3351 3352 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, 3353 struct device_attribute *attr, 3354 char *buf, 3355 enum pp_power_limit_level pp_limit_level) 3356 { 3357 struct amdgpu_device *adev = dev_get_drvdata(dev); 3358 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index; 3359 uint32_t limit; 3360 ssize_t size; 3361 int r; 3362 3363 r = amdgpu_pm_get_access_if_active(adev); 3364 if (r) 3365 return r; 3366 3367 r = amdgpu_dpm_get_power_limit(adev, &limit, 3368 pp_limit_level, power_type); 3369 3370 if (!r) 3371 size = sysfs_emit(buf, "%u\n", limit * 1000000); 3372 else 3373 size = sysfs_emit(buf, "\n"); 3374 3375 amdgpu_pm_put_access(adev); 3376 3377 return size; 3378 } 3379 3380 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, 3381 struct device_attribute *attr, 3382 char *buf) 3383 { 3384 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN); 3385 } 3386 3387 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, 3388 struct device_attribute *attr, 3389 char *buf) 3390 { 3391 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX); 3392 3393 } 3394 3395 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, 3396 struct device_attribute *attr, 3397 char *buf) 3398 { 3399 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT); 3400 3401 } 3402 3403 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev, 3404 struct device_attribute *attr, 3405 char *buf) 3406 { 3407 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT); 3408 3409 } 3410 3411 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev, 3412 struct device_attribute *attr, 3413 char *buf) 3414 { 3415 struct amdgpu_device *adev = dev_get_drvdata(dev); 3416 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 3417 3418 if (gc_ver == IP_VERSION(10, 3, 1)) 3419 return sysfs_emit(buf, "%s\n", 3420 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ? 3421 "fastPPT" : "slowPPT"); 3422 else 3423 return sysfs_emit(buf, "%s\n", 3424 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ? 3425 "PPT1" : "PPT"); 3426 } 3427 3428 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, 3429 struct device_attribute *attr, 3430 const char *buf, 3431 size_t count) 3432 { 3433 struct amdgpu_device *adev = dev_get_drvdata(dev); 3434 int limit_type = to_sensor_dev_attr(attr)->index; 3435 int err; 3436 u32 value; 3437 3438 err = kstrtou32(buf, 10, &value); 3439 if (err) 3440 return err; 3441 3442 value = value / 1000000; /* convert to Watt */ 3443 3444 err = amdgpu_pm_get_access(adev); 3445 if (err < 0) 3446 return err; 3447 3448 err = amdgpu_dpm_set_power_limit(adev, limit_type, value); 3449 3450 amdgpu_pm_put_access(adev); 3451 3452 if (err) 3453 return err; 3454 3455 return count; 3456 } 3457 3458 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, 3459 struct device_attribute *attr, 3460 char *buf) 3461 { 3462 struct amdgpu_device *adev = dev_get_drvdata(dev); 3463 uint32_t sclk; 3464 int r; 3465 3466 /* get the sclk */ 3467 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 3468 (void *)&sclk); 3469 if (r) 3470 return r; 3471 3472 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000); 3473 } 3474 3475 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, 3476 struct device_attribute *attr, 3477 char *buf) 3478 { 3479 return sysfs_emit(buf, "sclk\n"); 3480 } 3481 3482 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, 3483 struct device_attribute *attr, 3484 char *buf) 3485 { 3486 struct amdgpu_device *adev = dev_get_drvdata(dev); 3487 uint32_t mclk; 3488 int r; 3489 3490 /* get the sclk */ 3491 r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 3492 (void *)&mclk); 3493 if (r) 3494 return r; 3495 3496 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000); 3497 } 3498 3499 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, 3500 struct device_attribute *attr, 3501 char *buf) 3502 { 3503 return sysfs_emit(buf, "mclk\n"); 3504 } 3505 3506 /** 3507 * DOC: hwmon 3508 * 3509 * The amdgpu driver exposes the following sensor interfaces: 3510 * 3511 * - GPU temperature (via the on-die sensor) 3512 * 3513 * - GPU voltage 3514 * 3515 * - Northbridge voltage (APUs only) 3516 * 3517 * - GPU power 3518 * 3519 * - GPU fan 3520 * 3521 * - GPU gfx/compute engine clock 3522 * 3523 * - GPU memory clock (dGPU only) 3524 * 3525 * hwmon interfaces for GPU temperature: 3526 * 3527 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius 3528 * - temp2_input and temp3_input are supported on SOC15 dGPUs only 3529 * 3530 * - temp[1-3]_label: temperature channel label 3531 * - temp2_label and temp3_label are supported on SOC15 dGPUs only 3532 * 3533 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius 3534 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only 3535 * 3536 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius 3537 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only 3538 * 3539 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius 3540 * - these are supported on SOC15 dGPUs only 3541 * 3542 * hwmon interfaces for GPU voltage: 3543 * 3544 * - in0_input: the voltage on the GPU in millivolts 3545 * 3546 * - in1_input: the voltage on the Northbridge in millivolts 3547 * 3548 * hwmon interfaces for GPU power: 3549 * 3550 * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU. 3551 * 3552 * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU. 3553 * 3554 * - power1_cap_min: minimum cap supported in microWatts 3555 * 3556 * - power1_cap_max: maximum cap supported in microWatts 3557 * 3558 * - power1_cap: selected power cap in microWatts 3559 * 3560 * hwmon interfaces for GPU fan: 3561 * 3562 * - pwm1: pulse width modulation fan level (0-255) 3563 * 3564 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) 3565 * 3566 * - pwm1_min: pulse width modulation fan control minimum level (0) 3567 * 3568 * - pwm1_max: pulse width modulation fan control maximum level (255) 3569 * 3570 * - fan1_min: a minimum value Unit: revolution/min (RPM) 3571 * 3572 * - fan1_max: a maximum value Unit: revolution/max (RPM) 3573 * 3574 * - fan1_input: fan speed in RPM 3575 * 3576 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) 3577 * 3578 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable 3579 * 3580 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time. 3581 * That will get the former one overridden. 3582 * 3583 * hwmon interfaces for GPU clocks: 3584 * 3585 * - freq1_input: the gfx/compute clock in hertz 3586 * 3587 * - freq2_input: the memory clock in hertz 3588 * 3589 * You can use hwmon tools like sensors to view this information on your system. 3590 * 3591 */ 3592 3593 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE); 3594 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 3595 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 3596 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE); 3597 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION); 3598 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0); 3599 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1); 3600 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION); 3601 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM); 3602 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0); 3603 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1); 3604 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM); 3605 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE); 3606 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION); 3607 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM); 3608 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 3609 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 3610 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 3611 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 3612 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 3613 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); 3614 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); 3615 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); 3616 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); 3617 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); 3618 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); 3619 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); 3620 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); 3621 static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0); 3622 static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0); 3623 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); 3624 static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0); 3625 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 3626 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 3627 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 3628 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0); 3629 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0); 3630 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1); 3631 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1); 3632 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1); 3633 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1); 3634 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1); 3635 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); 3636 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); 3637 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); 3638 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); 3639 3640 static struct attribute *hwmon_attributes[] = { 3641 &sensor_dev_attr_temp1_input.dev_attr.attr, 3642 &sensor_dev_attr_temp1_crit.dev_attr.attr, 3643 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 3644 &sensor_dev_attr_temp2_input.dev_attr.attr, 3645 &sensor_dev_attr_temp2_crit.dev_attr.attr, 3646 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, 3647 &sensor_dev_attr_temp3_input.dev_attr.attr, 3648 &sensor_dev_attr_temp3_crit.dev_attr.attr, 3649 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, 3650 &sensor_dev_attr_temp1_emergency.dev_attr.attr, 3651 &sensor_dev_attr_temp2_emergency.dev_attr.attr, 3652 &sensor_dev_attr_temp3_emergency.dev_attr.attr, 3653 &sensor_dev_attr_temp1_label.dev_attr.attr, 3654 &sensor_dev_attr_temp2_label.dev_attr.attr, 3655 &sensor_dev_attr_temp3_label.dev_attr.attr, 3656 &sensor_dev_attr_pwm1.dev_attr.attr, 3657 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 3658 &sensor_dev_attr_pwm1_min.dev_attr.attr, 3659 &sensor_dev_attr_pwm1_max.dev_attr.attr, 3660 &sensor_dev_attr_fan1_input.dev_attr.attr, 3661 &sensor_dev_attr_fan1_min.dev_attr.attr, 3662 &sensor_dev_attr_fan1_max.dev_attr.attr, 3663 &sensor_dev_attr_fan1_target.dev_attr.attr, 3664 &sensor_dev_attr_fan1_enable.dev_attr.attr, 3665 &sensor_dev_attr_in0_input.dev_attr.attr, 3666 &sensor_dev_attr_in0_label.dev_attr.attr, 3667 &sensor_dev_attr_in1_input.dev_attr.attr, 3668 &sensor_dev_attr_in1_label.dev_attr.attr, 3669 &sensor_dev_attr_in2_input.dev_attr.attr, 3670 &sensor_dev_attr_in2_label.dev_attr.attr, 3671 &sensor_dev_attr_power1_average.dev_attr.attr, 3672 &sensor_dev_attr_power1_input.dev_attr.attr, 3673 &sensor_dev_attr_power1_cap_max.dev_attr.attr, 3674 &sensor_dev_attr_power1_cap_min.dev_attr.attr, 3675 &sensor_dev_attr_power1_cap.dev_attr.attr, 3676 &sensor_dev_attr_power1_cap_default.dev_attr.attr, 3677 &sensor_dev_attr_power1_label.dev_attr.attr, 3678 &sensor_dev_attr_power2_cap_max.dev_attr.attr, 3679 &sensor_dev_attr_power2_cap_min.dev_attr.attr, 3680 &sensor_dev_attr_power2_cap.dev_attr.attr, 3681 &sensor_dev_attr_power2_cap_default.dev_attr.attr, 3682 &sensor_dev_attr_power2_label.dev_attr.attr, 3683 &sensor_dev_attr_freq1_input.dev_attr.attr, 3684 &sensor_dev_attr_freq1_label.dev_attr.attr, 3685 &sensor_dev_attr_freq2_input.dev_attr.attr, 3686 &sensor_dev_attr_freq2_label.dev_attr.attr, 3687 NULL 3688 }; 3689 3690 static umode_t hwmon_attributes_visible(struct kobject *kobj, 3691 struct attribute *attr, int index) 3692 { 3693 struct device *dev = kobj_to_dev(kobj); 3694 struct amdgpu_device *adev = dev_get_drvdata(dev); 3695 umode_t effective_mode = attr->mode; 3696 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 3697 uint32_t tmp; 3698 3699 /* under pp one vf mode manage of hwmon attributes is not supported */ 3700 if (amdgpu_sriov_is_pp_one_vf(adev)) 3701 effective_mode &= ~S_IWUSR; 3702 3703 /* Skip fan attributes if fan is not present */ 3704 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3705 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3706 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3707 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3708 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3709 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3710 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3711 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3712 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3713 return 0; 3714 3715 /* Skip fan attributes on APU */ 3716 if ((adev->flags & AMD_IS_APU) && 3717 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3718 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3719 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3720 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3721 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3722 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3723 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3724 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3725 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3726 return 0; 3727 3728 /* Skip crit temp on APU */ 3729 if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) || 3730 amdgpu_is_multi_aid(adev)) && 3731 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3732 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 3733 return 0; 3734 3735 /* Skip limit attributes if DPM is not enabled */ 3736 if (!adev->pm.dpm_enabled && 3737 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3738 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 3739 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3740 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3741 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3742 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3743 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3744 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3745 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3746 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3747 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3748 return 0; 3749 3750 /* mask fan attributes if we have no bindings for this asic to expose */ 3751 if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) && 3752 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 3753 ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) && 3754 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 3755 effective_mode &= ~S_IRUGO; 3756 3757 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) && 3758 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 3759 ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) && 3760 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 3761 effective_mode &= ~S_IWUSR; 3762 3763 /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */ 3764 if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 3765 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr || 3766 attr == &sensor_dev_attr_power1_cap.dev_attr.attr || 3767 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) { 3768 if (adev->family == AMDGPU_FAMILY_SI || 3769 ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) && 3770 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) || 3771 (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3))) 3772 return 0; 3773 } 3774 3775 if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr && 3776 amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT)) 3777 effective_mode |= S_IWUSR; 3778 3779 /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */ 3780 if (((adev->family == AMDGPU_FAMILY_SI) || 3781 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) && 3782 (attr == &sensor_dev_attr_power1_average.dev_attr.attr)) 3783 return 0; 3784 3785 /* not all products support both average and instantaneous */ 3786 if (attr == &sensor_dev_attr_power1_average.dev_attr.attr && 3787 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, 3788 (void *)&tmp) == -EOPNOTSUPP) 3789 return 0; 3790 if (attr == &sensor_dev_attr_power1_input.dev_attr.attr && 3791 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, 3792 (void *)&tmp) == -EOPNOTSUPP) 3793 return 0; 3794 3795 /* hide max/min values if we can't both query and manage the fan */ 3796 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) && 3797 (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) && 3798 (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) && 3799 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) && 3800 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3801 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 3802 return 0; 3803 3804 if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) && 3805 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) && 3806 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3807 attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) 3808 return 0; 3809 3810 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 3811 adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */ 3812 amdgpu_is_multi_aid(adev)) && 3813 (attr == &sensor_dev_attr_in0_input.dev_attr.attr || 3814 attr == &sensor_dev_attr_in0_label.dev_attr.attr)) 3815 return 0; 3816 3817 /* only APUs other than gc 9,4,3 have vddnb */ 3818 if ((!(adev->flags & AMD_IS_APU) || 3819 amdgpu_is_multi_aid(adev)) && 3820 (attr == &sensor_dev_attr_in1_input.dev_attr.attr || 3821 attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 3822 return 0; 3823 3824 /* only few boards support vddboard */ 3825 if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr || 3826 attr == &sensor_dev_attr_in2_label.dev_attr.attr) && 3827 amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, 3828 (void *)&tmp) == -EOPNOTSUPP) 3829 return 0; 3830 3831 /* no mclk on APUs other than gc 9,4,3*/ 3832 if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) && 3833 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || 3834 attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) 3835 return 0; 3836 3837 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) && 3838 (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) && 3839 (attr == &sensor_dev_attr_temp2_input.dev_attr.attr || 3840 attr == &sensor_dev_attr_temp2_label.dev_attr.attr || 3841 attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || 3842 attr == &sensor_dev_attr_temp3_input.dev_attr.attr || 3843 attr == &sensor_dev_attr_temp3_label.dev_attr.attr || 3844 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr)) 3845 return 0; 3846 3847 /* hotspot temperature for gc 9,4,3*/ 3848 if (amdgpu_is_multi_aid(adev)) { 3849 if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr || 3850 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || 3851 attr == &sensor_dev_attr_temp1_label.dev_attr.attr) 3852 return 0; 3853 3854 if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || 3855 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr) 3856 return attr->mode; 3857 } 3858 3859 /* only SOC15 dGPUs support hotspot and mem temperatures */ 3860 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) && 3861 (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || 3862 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || 3863 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || 3864 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || 3865 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)) 3866 return 0; 3867 3868 /* only a few GPUs have fast PPT limit and power labels */ 3869 if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr || 3870 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr || 3871 attr == &sensor_dev_attr_power2_cap.dev_attr.attr || 3872 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr || 3873 attr == &sensor_dev_attr_power2_label.dev_attr.attr) && 3874 (amdgpu_dpm_get_power_limit(adev, &tmp, 3875 PP_PWR_LIMIT_MAX, 3876 PP_PWR_TYPE_FAST) == -EOPNOTSUPP)) 3877 return 0; 3878 3879 return effective_mode; 3880 } 3881 3882 static const struct attribute_group hwmon_attrgroup = { 3883 .attrs = hwmon_attributes, 3884 .is_visible = hwmon_attributes_visible, 3885 }; 3886 3887 static const struct attribute_group *hwmon_groups[] = { 3888 &hwmon_attrgroup, 3889 NULL 3890 }; 3891 3892 static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev, 3893 enum pp_clock_type od_type, 3894 char *buf) 3895 { 3896 int size = 0; 3897 int ret; 3898 3899 ret = amdgpu_pm_get_access_if_active(adev); 3900 if (ret) 3901 return ret; 3902 3903 ret = amdgpu_dpm_emit_clock_levels(adev, od_type, buf, &size); 3904 if (ret) 3905 return ret; 3906 if (size == 0) 3907 size = sysfs_emit(buf, "\n"); 3908 3909 amdgpu_pm_put_access(adev); 3910 3911 return size; 3912 } 3913 3914 static int parse_input_od_command_lines(const char *buf, 3915 size_t count, 3916 u32 *type, 3917 long *params, 3918 uint32_t *num_of_params) 3919 { 3920 const char delimiter[3] = {' ', '\n', '\0'}; 3921 uint32_t parameter_size = 0; 3922 char buf_cpy[128] = {0}; 3923 char *tmp_str, *sub_str; 3924 int ret; 3925 3926 if (count > sizeof(buf_cpy) - 1) 3927 return -EINVAL; 3928 3929 memcpy(buf_cpy, buf, count); 3930 tmp_str = buf_cpy; 3931 3932 /* skip heading spaces */ 3933 while (isspace(*tmp_str)) 3934 tmp_str++; 3935 3936 switch (*tmp_str) { 3937 case 'c': 3938 *type = PP_OD_COMMIT_DPM_TABLE; 3939 return 0; 3940 case 'r': 3941 params[parameter_size] = *type; 3942 *num_of_params = 1; 3943 *type = PP_OD_RESTORE_DEFAULT_TABLE; 3944 return 0; 3945 default: 3946 break; 3947 } 3948 3949 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { 3950 if (strlen(sub_str) == 0) 3951 continue; 3952 3953 ret = kstrtol(sub_str, 0, ¶ms[parameter_size]); 3954 if (ret) 3955 return -EINVAL; 3956 parameter_size++; 3957 3958 if (!tmp_str) 3959 break; 3960 3961 while (isspace(*tmp_str)) 3962 tmp_str++; 3963 } 3964 3965 *num_of_params = parameter_size; 3966 3967 return 0; 3968 } 3969 3970 static int 3971 amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev, 3972 enum PP_OD_DPM_TABLE_COMMAND cmd_type, 3973 const char *in_buf, 3974 size_t count) 3975 { 3976 uint32_t parameter_size = 0; 3977 long parameter[64]; 3978 int ret; 3979 3980 ret = parse_input_od_command_lines(in_buf, 3981 count, 3982 &cmd_type, 3983 parameter, 3984 ¶meter_size); 3985 if (ret) 3986 return ret; 3987 3988 ret = amdgpu_pm_get_access(adev); 3989 if (ret < 0) 3990 return ret; 3991 3992 ret = amdgpu_dpm_odn_edit_dpm_table(adev, 3993 cmd_type, 3994 parameter, 3995 parameter_size); 3996 if (ret) 3997 goto err_out; 3998 3999 if (cmd_type == PP_OD_COMMIT_DPM_TABLE) { 4000 ret = amdgpu_dpm_dispatch_task(adev, 4001 AMD_PP_TASK_READJUST_POWER_STATE, 4002 NULL); 4003 if (ret) 4004 goto err_out; 4005 } 4006 4007 amdgpu_pm_put_access(adev); 4008 4009 return count; 4010 4011 err_out: 4012 amdgpu_pm_put_access(adev); 4013 4014 return ret; 4015 } 4016 4017 /** 4018 * DOC: fan_curve 4019 * 4020 * The amdgpu driver provides a sysfs API for checking and adjusting the fan 4021 * control curve line. 4022 * 4023 * Reading back the file shows you the current settings(temperature in Celsius 4024 * degree and fan speed in pwm) applied to every anchor point of the curve line 4025 * and their permitted ranges if changable. 4026 * 4027 * Writing a desired string(with the format like "anchor_point_index temperature 4028 * fan_speed_in_pwm") to the file, change the settings for the specific anchor 4029 * point accordingly. 4030 * 4031 * When you have finished the editing, write "c" (commit) to the file to commit 4032 * your changes. 4033 * 4034 * If you want to reset to the default value, write "r" (reset) to the file to 4035 * reset them 4036 * 4037 * There are two fan control modes supported: auto and manual. With auto mode, 4038 * PMFW handles the fan speed control(how fan speed reacts to ASIC temperature). 4039 * While with manual mode, users can set their own fan curve line as what 4040 * described here. Normally the ASIC is booted up with auto mode. Any 4041 * settings via this interface will switch the fan control to manual mode 4042 * implicitly. 4043 */ 4044 static ssize_t fan_curve_show(struct kobject *kobj, 4045 struct kobj_attribute *attr, 4046 char *buf) 4047 { 4048 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4049 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4050 4051 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf); 4052 } 4053 4054 static ssize_t fan_curve_store(struct kobject *kobj, 4055 struct kobj_attribute *attr, 4056 const char *buf, 4057 size_t count) 4058 { 4059 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4060 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4061 4062 return (ssize_t)amdgpu_distribute_custom_od_settings(adev, 4063 PP_OD_EDIT_FAN_CURVE, 4064 buf, 4065 count); 4066 } 4067 4068 static umode_t fan_curve_visible(struct amdgpu_device *adev) 4069 { 4070 umode_t umode = 0000; 4071 4072 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE) 4073 umode |= S_IRUSR | S_IRGRP | S_IROTH; 4074 4075 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET) 4076 umode |= S_IWUSR; 4077 4078 return umode; 4079 } 4080 4081 /** 4082 * DOC: acoustic_limit_rpm_threshold 4083 * 4084 * The amdgpu driver provides a sysfs API for checking and adjusting the 4085 * acoustic limit in RPM for fan control. 4086 * 4087 * Reading back the file shows you the current setting and the permitted 4088 * ranges if changable. 4089 * 4090 * Writing an integer to the file, change the setting accordingly. 4091 * 4092 * When you have finished the editing, write "c" (commit) to the file to commit 4093 * your changes. 4094 * 4095 * If you want to reset to the default value, write "r" (reset) to the file to 4096 * reset them 4097 * 4098 * This setting works under auto fan control mode only. It adjusts the PMFW's 4099 * behavior about the maximum speed in RPM the fan can spin. Setting via this 4100 * interface will switch the fan control to auto mode implicitly. 4101 */ 4102 static ssize_t acoustic_limit_threshold_show(struct kobject *kobj, 4103 struct kobj_attribute *attr, 4104 char *buf) 4105 { 4106 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4107 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4108 4109 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf); 4110 } 4111 4112 static ssize_t acoustic_limit_threshold_store(struct kobject *kobj, 4113 struct kobj_attribute *attr, 4114 const char *buf, 4115 size_t count) 4116 { 4117 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4118 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4119 4120 return (ssize_t)amdgpu_distribute_custom_od_settings(adev, 4121 PP_OD_EDIT_ACOUSTIC_LIMIT, 4122 buf, 4123 count); 4124 } 4125 4126 static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev) 4127 { 4128 umode_t umode = 0000; 4129 4130 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE) 4131 umode |= S_IRUSR | S_IRGRP | S_IROTH; 4132 4133 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET) 4134 umode |= S_IWUSR; 4135 4136 return umode; 4137 } 4138 4139 /** 4140 * DOC: acoustic_target_rpm_threshold 4141 * 4142 * The amdgpu driver provides a sysfs API for checking and adjusting the 4143 * acoustic target in RPM for fan control. 4144 * 4145 * Reading back the file shows you the current setting and the permitted 4146 * ranges if changable. 4147 * 4148 * Writing an integer to the file, change the setting accordingly. 4149 * 4150 * When you have finished the editing, write "c" (commit) to the file to commit 4151 * your changes. 4152 * 4153 * If you want to reset to the default value, write "r" (reset) to the file to 4154 * reset them 4155 * 4156 * This setting works under auto fan control mode only. It can co-exist with 4157 * other settings which can work also under auto mode. It adjusts the PMFW's 4158 * behavior about the maximum speed in RPM the fan can spin when ASIC 4159 * temperature is not greater than target temperature. Setting via this 4160 * interface will switch the fan control to auto mode implicitly. 4161 */ 4162 static ssize_t acoustic_target_threshold_show(struct kobject *kobj, 4163 struct kobj_attribute *attr, 4164 char *buf) 4165 { 4166 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4167 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4168 4169 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf); 4170 } 4171 4172 static ssize_t acoustic_target_threshold_store(struct kobject *kobj, 4173 struct kobj_attribute *attr, 4174 const char *buf, 4175 size_t count) 4176 { 4177 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4178 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4179 4180 return (ssize_t)amdgpu_distribute_custom_od_settings(adev, 4181 PP_OD_EDIT_ACOUSTIC_TARGET, 4182 buf, 4183 count); 4184 } 4185 4186 static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev) 4187 { 4188 umode_t umode = 0000; 4189 4190 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE) 4191 umode |= S_IRUSR | S_IRGRP | S_IROTH; 4192 4193 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET) 4194 umode |= S_IWUSR; 4195 4196 return umode; 4197 } 4198 4199 /** 4200 * DOC: fan_target_temperature 4201 * 4202 * The amdgpu driver provides a sysfs API for checking and adjusting the 4203 * target tempeature in Celsius degree for fan control. 4204 * 4205 * Reading back the file shows you the current setting and the permitted 4206 * ranges if changable. 4207 * 4208 * Writing an integer to the file, change the setting accordingly. 4209 * 4210 * When you have finished the editing, write "c" (commit) to the file to commit 4211 * your changes. 4212 * 4213 * If you want to reset to the default value, write "r" (reset) to the file to 4214 * reset them 4215 * 4216 * This setting works under auto fan control mode only. It can co-exist with 4217 * other settings which can work also under auto mode. Paring with the 4218 * acoustic_target_rpm_threshold setting, they define the maximum speed in 4219 * RPM the fan can spin when ASIC temperature is not greater than target 4220 * temperature. Setting via this interface will switch the fan control to 4221 * auto mode implicitly. 4222 */ 4223 static ssize_t fan_target_temperature_show(struct kobject *kobj, 4224 struct kobj_attribute *attr, 4225 char *buf) 4226 { 4227 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4228 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4229 4230 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf); 4231 } 4232 4233 static ssize_t fan_target_temperature_store(struct kobject *kobj, 4234 struct kobj_attribute *attr, 4235 const char *buf, 4236 size_t count) 4237 { 4238 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4239 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4240 4241 return (ssize_t)amdgpu_distribute_custom_od_settings(adev, 4242 PP_OD_EDIT_FAN_TARGET_TEMPERATURE, 4243 buf, 4244 count); 4245 } 4246 4247 static umode_t fan_target_temperature_visible(struct amdgpu_device *adev) 4248 { 4249 umode_t umode = 0000; 4250 4251 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE) 4252 umode |= S_IRUSR | S_IRGRP | S_IROTH; 4253 4254 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET) 4255 umode |= S_IWUSR; 4256 4257 return umode; 4258 } 4259 4260 /** 4261 * DOC: fan_minimum_pwm 4262 * 4263 * The amdgpu driver provides a sysfs API for checking and adjusting the 4264 * minimum fan speed in PWM. 4265 * 4266 * Reading back the file shows you the current setting and the permitted 4267 * ranges if changable. 4268 * 4269 * Writing an integer to the file, change the setting accordingly. 4270 * 4271 * When you have finished the editing, write "c" (commit) to the file to commit 4272 * your changes. 4273 * 4274 * If you want to reset to the default value, write "r" (reset) to the file to 4275 * reset them 4276 * 4277 * This setting works under auto fan control mode only. It can co-exist with 4278 * other settings which can work also under auto mode. It adjusts the PMFW's 4279 * behavior about the minimum fan speed in PWM the fan should spin. Setting 4280 * via this interface will switch the fan control to auto mode implicitly. 4281 */ 4282 static ssize_t fan_minimum_pwm_show(struct kobject *kobj, 4283 struct kobj_attribute *attr, 4284 char *buf) 4285 { 4286 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4287 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4288 4289 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf); 4290 } 4291 4292 static ssize_t fan_minimum_pwm_store(struct kobject *kobj, 4293 struct kobj_attribute *attr, 4294 const char *buf, 4295 size_t count) 4296 { 4297 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4298 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4299 4300 return (ssize_t)amdgpu_distribute_custom_od_settings(adev, 4301 PP_OD_EDIT_FAN_MINIMUM_PWM, 4302 buf, 4303 count); 4304 } 4305 4306 static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev) 4307 { 4308 umode_t umode = 0000; 4309 4310 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE) 4311 umode |= S_IRUSR | S_IRGRP | S_IROTH; 4312 4313 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET) 4314 umode |= S_IWUSR; 4315 4316 return umode; 4317 } 4318 4319 /** 4320 * DOC: fan_zero_rpm_enable 4321 * 4322 * The amdgpu driver provides a sysfs API for checking and adjusting the 4323 * zero RPM feature. 4324 * 4325 * Reading back the file shows you the current setting and the permitted 4326 * ranges if changable. 4327 * 4328 * Writing an integer to the file, change the setting accordingly. 4329 * 4330 * When you have finished the editing, write "c" (commit) to the file to commit 4331 * your changes. 4332 * 4333 * If you want to reset to the default value, write "r" (reset) to the file to 4334 * reset them. 4335 */ 4336 static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj, 4337 struct kobj_attribute *attr, 4338 char *buf) 4339 { 4340 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4341 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4342 4343 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf); 4344 } 4345 4346 static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj, 4347 struct kobj_attribute *attr, 4348 const char *buf, 4349 size_t count) 4350 { 4351 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4352 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4353 4354 return (ssize_t)amdgpu_distribute_custom_od_settings(adev, 4355 PP_OD_EDIT_FAN_ZERO_RPM_ENABLE, 4356 buf, 4357 count); 4358 } 4359 4360 static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev) 4361 { 4362 umode_t umode = 0000; 4363 4364 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE) 4365 umode |= S_IRUSR | S_IRGRP | S_IROTH; 4366 4367 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET) 4368 umode |= S_IWUSR; 4369 4370 return umode; 4371 } 4372 4373 /** 4374 * DOC: fan_zero_rpm_stop_temperature 4375 * 4376 * The amdgpu driver provides a sysfs API for checking and adjusting the 4377 * zero RPM stop temperature feature. 4378 * 4379 * Reading back the file shows you the current setting and the permitted 4380 * ranges if changable. 4381 * 4382 * Writing an integer to the file, change the setting accordingly. 4383 * 4384 * When you have finished the editing, write "c" (commit) to the file to commit 4385 * your changes. 4386 * 4387 * If you want to reset to the default value, write "r" (reset) to the file to 4388 * reset them. 4389 * 4390 * This setting works only if the Zero RPM setting is enabled. It adjusts the 4391 * temperature below which the fan can stop. 4392 */ 4393 static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj, 4394 struct kobj_attribute *attr, 4395 char *buf) 4396 { 4397 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4398 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4399 4400 return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf); 4401 } 4402 4403 static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj, 4404 struct kobj_attribute *attr, 4405 const char *buf, 4406 size_t count) 4407 { 4408 struct od_kobj *container = container_of(kobj, struct od_kobj, kobj); 4409 struct amdgpu_device *adev = (struct amdgpu_device *)container->priv; 4410 4411 return (ssize_t)amdgpu_distribute_custom_od_settings(adev, 4412 PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP, 4413 buf, 4414 count); 4415 } 4416 4417 static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev) 4418 { 4419 umode_t umode = 0000; 4420 4421 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE) 4422 umode |= S_IRUSR | S_IRGRP | S_IROTH; 4423 4424 if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET) 4425 umode |= S_IWUSR; 4426 4427 return umode; 4428 } 4429 4430 static struct od_feature_set amdgpu_od_set = { 4431 .containers = { 4432 [0] = { 4433 .name = "fan_ctrl", 4434 .sub_feature = { 4435 [0] = { 4436 .name = "fan_curve", 4437 .ops = { 4438 .is_visible = fan_curve_visible, 4439 .show = fan_curve_show, 4440 .store = fan_curve_store, 4441 }, 4442 }, 4443 [1] = { 4444 .name = "acoustic_limit_rpm_threshold", 4445 .ops = { 4446 .is_visible = acoustic_limit_threshold_visible, 4447 .show = acoustic_limit_threshold_show, 4448 .store = acoustic_limit_threshold_store, 4449 }, 4450 }, 4451 [2] = { 4452 .name = "acoustic_target_rpm_threshold", 4453 .ops = { 4454 .is_visible = acoustic_target_threshold_visible, 4455 .show = acoustic_target_threshold_show, 4456 .store = acoustic_target_threshold_store, 4457 }, 4458 }, 4459 [3] = { 4460 .name = "fan_target_temperature", 4461 .ops = { 4462 .is_visible = fan_target_temperature_visible, 4463 .show = fan_target_temperature_show, 4464 .store = fan_target_temperature_store, 4465 }, 4466 }, 4467 [4] = { 4468 .name = "fan_minimum_pwm", 4469 .ops = { 4470 .is_visible = fan_minimum_pwm_visible, 4471 .show = fan_minimum_pwm_show, 4472 .store = fan_minimum_pwm_store, 4473 }, 4474 }, 4475 [5] = { 4476 .name = "fan_zero_rpm_enable", 4477 .ops = { 4478 .is_visible = fan_zero_rpm_enable_visible, 4479 .show = fan_zero_rpm_enable_show, 4480 .store = fan_zero_rpm_enable_store, 4481 }, 4482 }, 4483 [6] = { 4484 .name = "fan_zero_rpm_stop_temperature", 4485 .ops = { 4486 .is_visible = fan_zero_rpm_stop_temp_visible, 4487 .show = fan_zero_rpm_stop_temp_show, 4488 .store = fan_zero_rpm_stop_temp_store, 4489 }, 4490 }, 4491 }, 4492 }, 4493 }, 4494 }; 4495 4496 static void od_kobj_release(struct kobject *kobj) 4497 { 4498 struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj); 4499 4500 kfree(od_kobj); 4501 } 4502 4503 static const struct kobj_type od_ktype = { 4504 .release = od_kobj_release, 4505 .sysfs_ops = &kobj_sysfs_ops, 4506 }; 4507 4508 static void amdgpu_od_set_fini(struct amdgpu_device *adev) 4509 { 4510 struct od_kobj *container, *container_next; 4511 struct od_attribute *attribute, *attribute_next; 4512 4513 if (list_empty(&adev->pm.od_kobj_list)) 4514 return; 4515 4516 list_for_each_entry_safe(container, container_next, 4517 &adev->pm.od_kobj_list, entry) { 4518 list_del(&container->entry); 4519 4520 list_for_each_entry_safe(attribute, attribute_next, 4521 &container->attribute, entry) { 4522 list_del(&attribute->entry); 4523 sysfs_remove_file(&container->kobj, 4524 &attribute->attribute.attr); 4525 kfree(attribute); 4526 } 4527 4528 kobject_put(&container->kobj); 4529 } 4530 } 4531 4532 static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev, 4533 struct od_feature_ops *feature_ops) 4534 { 4535 umode_t mode; 4536 4537 if (!feature_ops->is_visible) 4538 return false; 4539 4540 /* 4541 * If the feature has no user read and write mode set, 4542 * we can assume the feature is actually not supported.(?) 4543 * And the revelant sysfs interface should not be exposed. 4544 */ 4545 mode = feature_ops->is_visible(adev); 4546 if (mode & (S_IRUSR | S_IWUSR)) 4547 return true; 4548 4549 return false; 4550 } 4551 4552 static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev, 4553 struct od_feature_container *container) 4554 { 4555 int i; 4556 4557 /* 4558 * If there is no valid entry within the container, the container 4559 * is recognized as a self contained container. And the valid entry 4560 * here means it has a valid naming and it is visible/supported by 4561 * the ASIC. 4562 */ 4563 for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) { 4564 if (container->sub_feature[i].name && 4565 amdgpu_is_od_feature_supported(adev, 4566 &container->sub_feature[i].ops)) 4567 return false; 4568 } 4569 4570 return true; 4571 } 4572 4573 static int amdgpu_od_set_init(struct amdgpu_device *adev) 4574 { 4575 struct od_kobj *top_set, *sub_set; 4576 struct od_attribute *attribute; 4577 struct od_feature_container *container; 4578 struct od_feature_item *feature; 4579 int i, j; 4580 int ret; 4581 4582 /* Setup the top `gpu_od` directory which holds all other OD interfaces */ 4583 top_set = kzalloc_obj(*top_set); 4584 if (!top_set) 4585 return -ENOMEM; 4586 list_add(&top_set->entry, &adev->pm.od_kobj_list); 4587 4588 ret = kobject_init_and_add(&top_set->kobj, 4589 &od_ktype, 4590 &adev->dev->kobj, 4591 "%s", 4592 "gpu_od"); 4593 if (ret) 4594 goto err_out; 4595 INIT_LIST_HEAD(&top_set->attribute); 4596 top_set->priv = adev; 4597 4598 for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) { 4599 container = &amdgpu_od_set.containers[i]; 4600 4601 if (!container->name) 4602 continue; 4603 4604 /* 4605 * If there is valid entries within the container, the container 4606 * will be presented as a sub directory and all its holding entries 4607 * will be presented as plain files under it. 4608 * While if there is no valid entry within the container, the container 4609 * itself will be presented as a plain file under top `gpu_od` directory. 4610 */ 4611 if (amdgpu_od_is_self_contained(adev, container)) { 4612 if (!amdgpu_is_od_feature_supported(adev, 4613 &container->ops)) 4614 continue; 4615 4616 /* 4617 * The container is presented as a plain file under top `gpu_od` 4618 * directory. 4619 */ 4620 attribute = kzalloc_obj(*attribute); 4621 if (!attribute) { 4622 ret = -ENOMEM; 4623 goto err_out; 4624 } 4625 list_add(&attribute->entry, &top_set->attribute); 4626 4627 attribute->attribute.attr.mode = 4628 container->ops.is_visible(adev); 4629 attribute->attribute.attr.name = container->name; 4630 attribute->attribute.show = 4631 container->ops.show; 4632 attribute->attribute.store = 4633 container->ops.store; 4634 ret = sysfs_create_file(&top_set->kobj, 4635 &attribute->attribute.attr); 4636 if (ret) 4637 goto err_out; 4638 } else { 4639 /* The container is presented as a sub directory. */ 4640 sub_set = kzalloc_obj(*sub_set); 4641 if (!sub_set) { 4642 ret = -ENOMEM; 4643 goto err_out; 4644 } 4645 list_add(&sub_set->entry, &adev->pm.od_kobj_list); 4646 4647 ret = kobject_init_and_add(&sub_set->kobj, 4648 &od_ktype, 4649 &top_set->kobj, 4650 "%s", 4651 container->name); 4652 if (ret) 4653 goto err_out; 4654 INIT_LIST_HEAD(&sub_set->attribute); 4655 sub_set->priv = adev; 4656 4657 for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) { 4658 feature = &container->sub_feature[j]; 4659 if (!feature->name) 4660 continue; 4661 4662 if (!amdgpu_is_od_feature_supported(adev, 4663 &feature->ops)) 4664 continue; 4665 4666 /* 4667 * With the container presented as a sub directory, the entry within 4668 * it is presented as a plain file under the sub directory. 4669 */ 4670 attribute = kzalloc_obj(*attribute); 4671 if (!attribute) { 4672 ret = -ENOMEM; 4673 goto err_out; 4674 } 4675 list_add(&attribute->entry, &sub_set->attribute); 4676 4677 attribute->attribute.attr.mode = 4678 feature->ops.is_visible(adev); 4679 attribute->attribute.attr.name = feature->name; 4680 attribute->attribute.show = 4681 feature->ops.show; 4682 attribute->attribute.store = 4683 feature->ops.store; 4684 ret = sysfs_create_file(&sub_set->kobj, 4685 &attribute->attribute.attr); 4686 if (ret) 4687 goto err_out; 4688 } 4689 } 4690 } 4691 4692 /* 4693 * If gpu_od is the only member in the list, that means gpu_od is an 4694 * empty directory, so remove it. 4695 */ 4696 if (list_is_singular(&adev->pm.od_kobj_list)) 4697 goto err_out; 4698 4699 return 0; 4700 4701 err_out: 4702 amdgpu_od_set_fini(adev); 4703 4704 return ret; 4705 } 4706 4707 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 4708 { 4709 enum amdgpu_sriov_vf_mode mode; 4710 uint32_t mask = 0; 4711 uint32_t tmp; 4712 int ret; 4713 4714 if (adev->pm.sysfs_initialized) 4715 return 0; 4716 4717 INIT_LIST_HEAD(&adev->pm.pm_attr_list); 4718 4719 if (adev->pm.dpm_enabled == 0) 4720 return 0; 4721 4722 mode = amdgpu_virt_get_sriov_vf_mode(adev); 4723 4724 /* under multi-vf mode, the hwmon attributes are all not supported */ 4725 if (mode != SRIOV_VF_MODE_MULTI_VF) { 4726 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 4727 DRIVER_NAME, adev, 4728 hwmon_groups); 4729 if (IS_ERR(adev->pm.int_hwmon_dev)) { 4730 ret = PTR_ERR(adev->pm.int_hwmon_dev); 4731 dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret); 4732 return ret; 4733 } 4734 } 4735 4736 switch (mode) { 4737 case SRIOV_VF_MODE_ONE_VF: 4738 mask = ATTR_FLAG_ONEVF; 4739 break; 4740 case SRIOV_VF_MODE_MULTI_VF: 4741 mask = 0; 4742 break; 4743 case SRIOV_VF_MODE_BARE_METAL: 4744 default: 4745 mask = ATTR_FLAG_MASK_ALL; 4746 break; 4747 } 4748 4749 ret = amdgpu_device_attr_create_groups(adev, 4750 amdgpu_device_attrs, 4751 ARRAY_SIZE(amdgpu_device_attrs), 4752 mask, 4753 &adev->pm.pm_attr_list); 4754 if (ret) 4755 goto err_out0; 4756 4757 if (amdgpu_dpm_is_overdrive_supported(adev)) { 4758 ret = amdgpu_od_set_init(adev); 4759 if (ret) 4760 goto err_out1; 4761 } else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) { 4762 dev_info(adev->dev, "overdrive feature is not supported\n"); 4763 } 4764 4765 if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) != 4766 -EOPNOTSUPP) { 4767 ret = devm_device_add_group(adev->dev, 4768 &amdgpu_pm_policy_attr_group); 4769 if (ret) 4770 goto err_out1; 4771 } 4772 4773 if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) { 4774 ret = devm_device_add_group(adev->dev, 4775 &amdgpu_board_attr_group); 4776 if (ret) 4777 goto err_out1; 4778 if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, 4779 (void *)&tmp) != -EOPNOTSUPP) { 4780 sysfs_add_file_to_group(&adev->dev->kobj, 4781 &dev_attr_cur_node_power_limit.attr, 4782 amdgpu_board_attr_group.name); 4783 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr, 4784 amdgpu_board_attr_group.name); 4785 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr, 4786 amdgpu_board_attr_group.name); 4787 sysfs_add_file_to_group(&adev->dev->kobj, 4788 &dev_attr_max_node_power_limit.attr, 4789 amdgpu_board_attr_group.name); 4790 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr, 4791 amdgpu_board_attr_group.name); 4792 } 4793 if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_UBB_POWER_LIMIT, 4794 (void *)&tmp) != -EOPNOTSUPP) { 4795 sysfs_add_file_to_group(&adev->dev->kobj, 4796 &dev_attr_baseboard_power_limit.attr, 4797 amdgpu_board_attr_group.name); 4798 sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_baseboard_power.attr, 4799 amdgpu_board_attr_group.name); 4800 } 4801 } 4802 4803 adev->pm.sysfs_initialized = true; 4804 4805 return 0; 4806 4807 err_out1: 4808 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list); 4809 err_out0: 4810 if (adev->pm.int_hwmon_dev) 4811 hwmon_device_unregister(adev->pm.int_hwmon_dev); 4812 4813 return ret; 4814 } 4815 4816 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 4817 { 4818 amdgpu_od_set_fini(adev); 4819 4820 if (adev->pm.int_hwmon_dev) 4821 hwmon_device_unregister(adev->pm.int_hwmon_dev); 4822 4823 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list); 4824 } 4825 4826 /* 4827 * Debugfs info 4828 */ 4829 #if defined(CONFIG_DEBUG_FS) 4830 4831 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m, 4832 struct amdgpu_device *adev) 4833 { 4834 uint16_t *p_val; 4835 uint32_t size; 4836 int i; 4837 uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev); 4838 4839 if (amdgpu_dpm_is_cclk_dpm_supported(adev)) { 4840 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t), 4841 GFP_KERNEL); 4842 4843 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK, 4844 (void *)p_val, &size)) { 4845 for (i = 0; i < num_cpu_cores; i++) 4846 seq_printf(m, "\t%u MHz (CPU%d)\n", 4847 *(p_val + i), i); 4848 } 4849 4850 kfree(p_val); 4851 } 4852 } 4853 4854 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 4855 { 4856 uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); 4857 uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); 4858 uint32_t value; 4859 uint64_t value64 = 0; 4860 uint32_t query = 0; 4861 int size; 4862 4863 /* GPU Clocks */ 4864 size = sizeof(value); 4865 seq_printf(m, "GFX Clocks and Power:\n"); 4866 4867 amdgpu_debugfs_prints_cpu_info(m, adev); 4868 4869 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 4870 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 4871 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 4872 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 4873 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) 4874 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); 4875 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) 4876 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); 4877 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 4878 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 4879 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 4880 seq_printf(m, "\t%u mV (VDDNB)\n", value); 4881 size = sizeof(uint32_t); 4882 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) { 4883 if (adev->flags & AMD_IS_APU) 4884 seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff); 4885 else 4886 seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff); 4887 } 4888 size = sizeof(uint32_t); 4889 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) { 4890 if (adev->flags & AMD_IS_APU) 4891 seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff); 4892 else 4893 seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff); 4894 } 4895 size = sizeof(value); 4896 seq_printf(m, "\n"); 4897 4898 /* GPU Temp */ 4899 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 4900 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 4901 4902 /* GPU Load */ 4903 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 4904 seq_printf(m, "GPU Load: %u %%\n", value); 4905 /* MEM Load */ 4906 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) 4907 seq_printf(m, "MEM Load: %u %%\n", value); 4908 /* VCN Load */ 4909 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size)) 4910 seq_printf(m, "VCN Load: %u %%\n", value); 4911 4912 seq_printf(m, "\n"); 4913 4914 /* SMC feature mask */ 4915 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) 4916 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); 4917 4918 /* ASICs greater than CHIP_VEGA20 supports these sensors */ 4919 if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) { 4920 /* VCN clocks */ 4921 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { 4922 if (!value) { 4923 seq_printf(m, "VCN: Powered down\n"); 4924 } else { 4925 seq_printf(m, "VCN: Powered up\n"); 4926 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 4927 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 4928 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 4929 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 4930 } 4931 } 4932 seq_printf(m, "\n"); 4933 } else { 4934 /* UVD clocks */ 4935 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 4936 if (!value) { 4937 seq_printf(m, "UVD: Powered down\n"); 4938 } else { 4939 seq_printf(m, "UVD: Powered up\n"); 4940 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 4941 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 4942 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 4943 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 4944 } 4945 } 4946 seq_printf(m, "\n"); 4947 4948 /* VCE clocks */ 4949 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 4950 if (!value) { 4951 seq_printf(m, "VCE: Powered down\n"); 4952 } else { 4953 seq_printf(m, "VCE: Powered up\n"); 4954 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 4955 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 4956 } 4957 } 4958 } 4959 4960 return 0; 4961 } 4962 4963 static const struct cg_flag_name clocks[] = { 4964 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"}, 4965 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 4966 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 4967 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 4968 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 4969 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 4970 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 4971 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 4972 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 4973 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 4974 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 4975 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 4976 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 4977 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 4978 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 4979 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 4980 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 4981 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 4982 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 4983 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 4984 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 4985 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 4986 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 4987 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 4988 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 4989 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"}, 4990 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"}, 4991 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"}, 4992 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"}, 4993 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"}, 4994 {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"}, 4995 {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"}, 4996 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, 4997 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, 4998 {0, NULL}, 4999 }; 5000 5001 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags) 5002 { 5003 int i; 5004 5005 for (i = 0; clocks[i].flag; i++) 5006 seq_printf(m, "\t%s: %s\n", clocks[i].name, 5007 (flags & clocks[i].flag) ? "On" : "Off"); 5008 } 5009 5010 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) 5011 { 5012 struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 5013 u64 flags = 0; 5014 int r; 5015 5016 r = amdgpu_pm_get_access(adev); 5017 if (r < 0) 5018 return r; 5019 5020 if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) { 5021 r = amdgpu_debugfs_pm_info_pp(m, adev); 5022 if (r) 5023 goto out; 5024 } 5025 5026 amdgpu_device_ip_get_clockgating_state(adev, &flags); 5027 5028 seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags); 5029 amdgpu_parse_cg_state(m, flags); 5030 seq_printf(m, "\n"); 5031 5032 out: 5033 amdgpu_pm_put_access(adev); 5034 5035 return r; 5036 } 5037 5038 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info); 5039 5040 /* 5041 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW 5042 * 5043 * Reads debug memory region allocated to PMFW 5044 */ 5045 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf, 5046 size_t size, loff_t *pos) 5047 { 5048 struct amdgpu_device *adev = file_inode(f)->i_private; 5049 size_t smu_prv_buf_size; 5050 void *smu_prv_buf; 5051 int ret = 0; 5052 5053 ret = amdgpu_pm_dev_state_check(adev, true); 5054 if (ret) 5055 return ret; 5056 5057 ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size); 5058 if (ret) 5059 return ret; 5060 5061 if (!smu_prv_buf || !smu_prv_buf_size) 5062 return -EINVAL; 5063 5064 return simple_read_from_buffer(buf, size, pos, smu_prv_buf, 5065 smu_prv_buf_size); 5066 } 5067 5068 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = { 5069 .owner = THIS_MODULE, 5070 .open = simple_open, 5071 .read = amdgpu_pm_prv_buffer_read, 5072 .llseek = default_llseek, 5073 }; 5074 5075 #endif 5076 5077 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 5078 { 5079 #if defined(CONFIG_DEBUG_FS) 5080 struct drm_minor *minor = adev_to_drm(adev)->primary; 5081 struct dentry *root = minor->debugfs_root; 5082 5083 if (!adev->pm.dpm_enabled) 5084 return; 5085 5086 debugfs_create_file("amdgpu_pm_info", 0444, root, adev, 5087 &amdgpu_debugfs_pm_info_fops); 5088 5089 if (adev->pm.smu_prv_buffer_size > 0) 5090 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root, 5091 adev, 5092 &amdgpu_debugfs_pm_prv_buffer_fops, 5093 adev->pm.smu_prv_buffer_size); 5094 5095 amdgpu_dpm_stb_debug_fs_init(adev); 5096 #endif 5097 } 5098