1 /* 2 * Permission is hereby granted, free of charge, to any person obtaining a 3 * copy of this software and associated documentation files (the "Software"), 4 * to deal in the Software without restriction, including without limitation 5 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 6 * and/or sell copies of the Software, and to permit persons to whom the 7 * Software is furnished to do so, subject to the following conditions: 8 * 9 * The above copyright notice and this permission notice shall be included in 10 * all copies or substantial portions of the Software. 11 * 12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 18 * OTHER DEALINGS IN THE SOFTWARE. 19 * 20 * Authors: Rafał Miłecki <zajec5@gmail.com> 21 * Alex Deucher <alexdeucher@gmail.com> 22 */ 23 24 #include <linux/debugfs.h> 25 #include <linux/hwmon-sysfs.h> 26 #include <linux/hwmon.h> 27 #include <linux/pci.h> 28 #include <linux/power_supply.h> 29 30 #include <drm/drm_vblank.h> 31 32 #include "atom.h" 33 #include "avivod.h" 34 #include "r600_dpm.h" 35 #include "radeon.h" 36 #include "radeon_pm.h" 37 38 #define RADEON_IDLE_LOOP_MS 100 39 #define RADEON_RECLOCK_DELAY_MS 200 40 #define RADEON_WAIT_VBLANK_TIMEOUT 200 41 42 static const char *radeon_pm_state_type_name[5] = { 43 "", 44 "Powersave", 45 "Battery", 46 "Balanced", 47 "Performance", 48 }; 49 50 static void radeon_dynpm_idle_work_handler(struct work_struct *work); 51 static void radeon_debugfs_pm_init(struct radeon_device *rdev); 52 static bool radeon_pm_in_vbl(struct radeon_device *rdev); 53 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish); 54 static void radeon_pm_update_profile(struct radeon_device *rdev); 55 static void radeon_pm_set_clocks(struct radeon_device *rdev); 56 57 int radeon_pm_get_type_index(struct radeon_device *rdev, 58 enum radeon_pm_state_type ps_type, 59 int instance) 60 { 61 int i; 62 int found_instance = -1; 63 64 for (i = 0; i < rdev->pm.num_power_states; i++) { 65 if (rdev->pm.power_state[i].type == ps_type) { 66 found_instance++; 67 if (found_instance == instance) 68 return i; 69 } 70 } 71 /* return default if no match */ 72 return rdev->pm.default_power_state_index; 73 } 74 75 void radeon_pm_acpi_event_handler(struct radeon_device *rdev) 76 { 77 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { 78 mutex_lock(&rdev->pm.mutex); 79 if (power_supply_is_system_supplied() > 0) 80 rdev->pm.dpm.ac_power = true; 81 else 82 rdev->pm.dpm.ac_power = false; 83 if (rdev->family == CHIP_ARUBA) { 84 if (rdev->asic->dpm.enable_bapm) 85 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); 86 } 87 mutex_unlock(&rdev->pm.mutex); 88 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 89 if (rdev->pm.profile == PM_PROFILE_AUTO) { 90 mutex_lock(&rdev->pm.mutex); 91 radeon_pm_update_profile(rdev); 92 radeon_pm_set_clocks(rdev); 93 mutex_unlock(&rdev->pm.mutex); 94 } 95 } 96 } 97 98 static void radeon_pm_update_profile(struct radeon_device *rdev) 99 { 100 switch (rdev->pm.profile) { 101 case PM_PROFILE_DEFAULT: 102 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX; 103 break; 104 case PM_PROFILE_AUTO: 105 if (power_supply_is_system_supplied() > 0) { 106 if (rdev->pm.active_crtc_count > 1) 107 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 108 else 109 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 110 } else { 111 if (rdev->pm.active_crtc_count > 1) 112 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 113 else 114 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 115 } 116 break; 117 case PM_PROFILE_LOW: 118 if (rdev->pm.active_crtc_count > 1) 119 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX; 120 else 121 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX; 122 break; 123 case PM_PROFILE_MID: 124 if (rdev->pm.active_crtc_count > 1) 125 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX; 126 else 127 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX; 128 break; 129 case PM_PROFILE_HIGH: 130 if (rdev->pm.active_crtc_count > 1) 131 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX; 132 else 133 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX; 134 break; 135 } 136 137 if (rdev->pm.active_crtc_count == 0) { 138 rdev->pm.requested_power_state_index = 139 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx; 140 rdev->pm.requested_clock_mode_index = 141 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx; 142 } else { 143 rdev->pm.requested_power_state_index = 144 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx; 145 rdev->pm.requested_clock_mode_index = 146 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx; 147 } 148 } 149 150 static void radeon_unmap_vram_bos(struct radeon_device *rdev) 151 { 152 struct radeon_bo *bo, *n; 153 154 if (list_empty(&rdev->gem.objects)) 155 return; 156 157 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { 158 if (bo->tbo.resource->mem_type == TTM_PL_VRAM) 159 ttm_bo_unmap_virtual(&bo->tbo); 160 } 161 } 162 163 static void radeon_sync_with_vblank(struct radeon_device *rdev) 164 { 165 if (rdev->pm.active_crtcs) { 166 rdev->pm.vblank_sync = false; 167 wait_event_timeout( 168 rdev->irq.vblank_queue, rdev->pm.vblank_sync, 169 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); 170 } 171 } 172 173 static void radeon_set_power_state(struct radeon_device *rdev) 174 { 175 u32 sclk, mclk; 176 bool misc_after = false; 177 178 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 179 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 180 return; 181 182 if (radeon_gui_idle(rdev)) { 183 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 184 clock_info[rdev->pm.requested_clock_mode_index].sclk; 185 if (sclk > rdev->pm.default_sclk) 186 sclk = rdev->pm.default_sclk; 187 188 /* starting with BTC, there is one state that is used for both 189 * MH and SH. Difference is that we always use the high clock index for 190 * mclk and vddci. 191 */ 192 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && 193 (rdev->family >= CHIP_BARTS) && 194 rdev->pm.active_crtc_count && 195 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) || 196 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX))) 197 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 198 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk; 199 else 200 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index]. 201 clock_info[rdev->pm.requested_clock_mode_index].mclk; 202 203 if (mclk > rdev->pm.default_mclk) 204 mclk = rdev->pm.default_mclk; 205 206 /* upvolt before raising clocks, downvolt after lowering clocks */ 207 if (sclk < rdev->pm.current_sclk) 208 misc_after = true; 209 210 radeon_sync_with_vblank(rdev); 211 212 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 213 if (!radeon_pm_in_vbl(rdev)) 214 return; 215 } 216 217 radeon_pm_prepare(rdev); 218 219 if (!misc_after) 220 /* voltage, pcie lanes, etc.*/ 221 radeon_pm_misc(rdev); 222 223 /* set engine clock */ 224 if (sclk != rdev->pm.current_sclk) { 225 radeon_pm_debug_check_in_vbl(rdev, false); 226 radeon_set_engine_clock(rdev, sclk); 227 radeon_pm_debug_check_in_vbl(rdev, true); 228 rdev->pm.current_sclk = sclk; 229 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); 230 } 231 232 /* set memory clock */ 233 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) { 234 radeon_pm_debug_check_in_vbl(rdev, false); 235 radeon_set_memory_clock(rdev, mclk); 236 radeon_pm_debug_check_in_vbl(rdev, true); 237 rdev->pm.current_mclk = mclk; 238 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); 239 } 240 241 if (misc_after) 242 /* voltage, pcie lanes, etc.*/ 243 radeon_pm_misc(rdev); 244 245 radeon_pm_finish(rdev); 246 247 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; 248 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; 249 } else 250 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); 251 } 252 253 static void radeon_pm_set_clocks(struct radeon_device *rdev) 254 { 255 struct drm_crtc *crtc; 256 int i, r; 257 258 /* no need to take locks, etc. if nothing's going to change */ 259 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 260 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) 261 return; 262 263 down_write(&rdev->pm.mclk_lock); 264 mutex_lock(&rdev->ring_lock); 265 266 /* wait for the rings to drain */ 267 for (i = 0; i < RADEON_NUM_RINGS; i++) { 268 struct radeon_ring *ring = &rdev->ring[i]; 269 if (!ring->ready) { 270 continue; 271 } 272 r = radeon_fence_wait_empty(rdev, i); 273 if (r) { 274 /* needs a GPU reset dont reset here */ 275 mutex_unlock(&rdev->ring_lock); 276 up_write(&rdev->pm.mclk_lock); 277 return; 278 } 279 } 280 281 radeon_unmap_vram_bos(rdev); 282 283 if (rdev->irq.installed) { 284 i = 0; 285 drm_for_each_crtc(crtc, rdev_to_drm(rdev)) { 286 if (rdev->pm.active_crtcs & (1 << i)) { 287 /* This can fail if a modeset is in progress */ 288 if (drm_crtc_vblank_get(crtc) == 0) 289 rdev->pm.req_vblank |= (1 << i); 290 else 291 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", 292 i); 293 } 294 i++; 295 } 296 } 297 298 radeon_set_power_state(rdev); 299 300 if (rdev->irq.installed) { 301 i = 0; 302 drm_for_each_crtc(crtc, rdev_to_drm(rdev)) { 303 if (rdev->pm.req_vblank & (1 << i)) { 304 rdev->pm.req_vblank &= ~(1 << i); 305 drm_crtc_vblank_put(crtc); 306 } 307 i++; 308 } 309 } 310 311 /* update display watermarks based on new power state */ 312 radeon_update_bandwidth_info(rdev); 313 if (rdev->pm.active_crtc_count) 314 radeon_bandwidth_update(rdev); 315 316 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 317 318 mutex_unlock(&rdev->ring_lock); 319 up_write(&rdev->pm.mclk_lock); 320 } 321 322 static void radeon_pm_print_states(struct radeon_device *rdev) 323 { 324 int i, j; 325 struct radeon_power_state *power_state; 326 struct radeon_pm_clock_info *clock_info; 327 328 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); 329 for (i = 0; i < rdev->pm.num_power_states; i++) { 330 power_state = &rdev->pm.power_state[i]; 331 DRM_DEBUG_DRIVER("State %d: %s\n", i, 332 radeon_pm_state_type_name[power_state->type]); 333 if (i == rdev->pm.default_power_state_index) 334 DRM_DEBUG_DRIVER("\tDefault"); 335 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) 336 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); 337 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) 338 DRM_DEBUG_DRIVER("\tSingle display only\n"); 339 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); 340 for (j = 0; j < power_state->num_clock_modes; j++) { 341 clock_info = &(power_state->clock_info[j]); 342 if (rdev->flags & RADEON_IS_IGP) 343 DRM_DEBUG_DRIVER("\t\t%d e: %d\n", 344 j, 345 clock_info->sclk * 10); 346 else 347 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n", 348 j, 349 clock_info->sclk * 10, 350 clock_info->mclk * 10, 351 clock_info->voltage.voltage); 352 } 353 } 354 } 355 356 static ssize_t radeon_get_pm_profile(struct device *dev, 357 struct device_attribute *attr, 358 char *buf) 359 { 360 struct drm_device *ddev = dev_get_drvdata(dev); 361 struct radeon_device *rdev = ddev->dev_private; 362 int cp = rdev->pm.profile; 363 364 return sysfs_emit(buf, "%s\n", (cp == PM_PROFILE_AUTO) ? "auto" : 365 (cp == PM_PROFILE_LOW) ? "low" : 366 (cp == PM_PROFILE_MID) ? "mid" : 367 (cp == PM_PROFILE_HIGH) ? "high" : "default"); 368 } 369 370 static ssize_t radeon_set_pm_profile(struct device *dev, 371 struct device_attribute *attr, 372 const char *buf, 373 size_t count) 374 { 375 struct drm_device *ddev = dev_get_drvdata(dev); 376 struct radeon_device *rdev = ddev->dev_private; 377 378 /* Can't set profile when the card is off */ 379 if ((rdev->flags & RADEON_IS_PX) && 380 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 381 return -EINVAL; 382 383 mutex_lock(&rdev->pm.mutex); 384 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 385 if (strncmp("default", buf, strlen("default")) == 0) 386 rdev->pm.profile = PM_PROFILE_DEFAULT; 387 else if (strncmp("auto", buf, strlen("auto")) == 0) 388 rdev->pm.profile = PM_PROFILE_AUTO; 389 else if (strncmp("low", buf, strlen("low")) == 0) 390 rdev->pm.profile = PM_PROFILE_LOW; 391 else if (strncmp("mid", buf, strlen("mid")) == 0) 392 rdev->pm.profile = PM_PROFILE_MID; 393 else if (strncmp("high", buf, strlen("high")) == 0) 394 rdev->pm.profile = PM_PROFILE_HIGH; 395 else { 396 count = -EINVAL; 397 goto fail; 398 } 399 radeon_pm_update_profile(rdev); 400 radeon_pm_set_clocks(rdev); 401 } else 402 count = -EINVAL; 403 404 fail: 405 mutex_unlock(&rdev->pm.mutex); 406 407 return count; 408 } 409 410 static ssize_t radeon_get_pm_method(struct device *dev, 411 struct device_attribute *attr, 412 char *buf) 413 { 414 struct drm_device *ddev = dev_get_drvdata(dev); 415 struct radeon_device *rdev = ddev->dev_private; 416 int pm = rdev->pm.pm_method; 417 418 return sysfs_emit(buf, "%s\n", (pm == PM_METHOD_DYNPM) ? "dynpm" : 419 (pm == PM_METHOD_PROFILE) ? "profile" : "dpm"); 420 } 421 422 static ssize_t radeon_set_pm_method(struct device *dev, 423 struct device_attribute *attr, 424 const char *buf, 425 size_t count) 426 { 427 struct drm_device *ddev = dev_get_drvdata(dev); 428 struct radeon_device *rdev = ddev->dev_private; 429 430 /* Can't set method when the card is off */ 431 if ((rdev->flags & RADEON_IS_PX) && 432 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 433 count = -EINVAL; 434 goto fail; 435 } 436 437 /* we don't support the legacy modes with dpm */ 438 if (rdev->pm.pm_method == PM_METHOD_DPM) { 439 count = -EINVAL; 440 goto fail; 441 } 442 443 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) { 444 mutex_lock(&rdev->pm.mutex); 445 rdev->pm.pm_method = PM_METHOD_DYNPM; 446 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 447 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 448 mutex_unlock(&rdev->pm.mutex); 449 } else if (strncmp("profile", buf, strlen("profile")) == 0) { 450 mutex_lock(&rdev->pm.mutex); 451 /* disable dynpm */ 452 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 453 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 454 rdev->pm.pm_method = PM_METHOD_PROFILE; 455 mutex_unlock(&rdev->pm.mutex); 456 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 457 } else { 458 count = -EINVAL; 459 goto fail; 460 } 461 radeon_pm_compute_clocks(rdev); 462 fail: 463 return count; 464 } 465 466 static ssize_t radeon_get_dpm_state(struct device *dev, 467 struct device_attribute *attr, 468 char *buf) 469 { 470 struct drm_device *ddev = dev_get_drvdata(dev); 471 struct radeon_device *rdev = ddev->dev_private; 472 enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; 473 474 return sysfs_emit(buf, "%s\n", 475 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 476 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 477 } 478 479 static ssize_t radeon_set_dpm_state(struct device *dev, 480 struct device_attribute *attr, 481 const char *buf, 482 size_t count) 483 { 484 struct drm_device *ddev = dev_get_drvdata(dev); 485 struct radeon_device *rdev = ddev->dev_private; 486 487 mutex_lock(&rdev->pm.mutex); 488 if (strncmp("battery", buf, strlen("battery")) == 0) 489 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; 490 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 491 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 492 else if (strncmp("performance", buf, strlen("performance")) == 0) 493 rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; 494 else { 495 mutex_unlock(&rdev->pm.mutex); 496 count = -EINVAL; 497 goto fail; 498 } 499 mutex_unlock(&rdev->pm.mutex); 500 501 /* Can't set dpm state when the card is off */ 502 if (!(rdev->flags & RADEON_IS_PX) || 503 (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) 504 radeon_pm_compute_clocks(rdev); 505 506 fail: 507 return count; 508 } 509 510 static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, 511 struct device_attribute *attr, 512 char *buf) 513 { 514 struct drm_device *ddev = dev_get_drvdata(dev); 515 struct radeon_device *rdev = ddev->dev_private; 516 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 517 518 if ((rdev->flags & RADEON_IS_PX) && 519 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 520 return sysfs_emit(buf, "off\n"); 521 522 return sysfs_emit(buf, "%s\n", 523 (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : 524 (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); 525 } 526 527 static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, 528 struct device_attribute *attr, 529 const char *buf, 530 size_t count) 531 { 532 struct drm_device *ddev = dev_get_drvdata(dev); 533 struct radeon_device *rdev = ddev->dev_private; 534 enum radeon_dpm_forced_level level; 535 int ret = 0; 536 537 /* Can't force performance level when the card is off */ 538 if ((rdev->flags & RADEON_IS_PX) && 539 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 540 return -EINVAL; 541 542 mutex_lock(&rdev->pm.mutex); 543 if (strncmp("low", buf, strlen("low")) == 0) { 544 level = RADEON_DPM_FORCED_LEVEL_LOW; 545 } else if (strncmp("high", buf, strlen("high")) == 0) { 546 level = RADEON_DPM_FORCED_LEVEL_HIGH; 547 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 548 level = RADEON_DPM_FORCED_LEVEL_AUTO; 549 } else { 550 count = -EINVAL; 551 goto fail; 552 } 553 if (rdev->asic->dpm.force_performance_level) { 554 if (rdev->pm.dpm.thermal_active) { 555 count = -EINVAL; 556 goto fail; 557 } 558 ret = radeon_dpm_force_performance_level(rdev, level); 559 if (ret) 560 count = -EINVAL; 561 } 562 fail: 563 mutex_unlock(&rdev->pm.mutex); 564 565 return count; 566 } 567 568 static ssize_t radeon_hwmon_get_pwm1_enable(struct device *dev, 569 struct device_attribute *attr, 570 char *buf) 571 { 572 struct radeon_device *rdev = dev_get_drvdata(dev); 573 u32 pwm_mode = 0; 574 575 if (rdev->asic->dpm.fan_ctrl_get_mode) 576 pwm_mode = rdev->asic->dpm.fan_ctrl_get_mode(rdev); 577 578 /* never 0 (full-speed), fuse or smc-controlled always */ 579 return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2); 580 } 581 582 static ssize_t radeon_hwmon_set_pwm1_enable(struct device *dev, 583 struct device_attribute *attr, 584 const char *buf, 585 size_t count) 586 { 587 struct radeon_device *rdev = dev_get_drvdata(dev); 588 int err; 589 int value; 590 591 if (!rdev->asic->dpm.fan_ctrl_set_mode) 592 return -EINVAL; 593 594 err = kstrtoint(buf, 10, &value); 595 if (err) 596 return err; 597 598 switch (value) { 599 case 1: /* manual, percent-based */ 600 rdev->asic->dpm.fan_ctrl_set_mode(rdev, FDO_PWM_MODE_STATIC); 601 break; 602 default: /* disable */ 603 rdev->asic->dpm.fan_ctrl_set_mode(rdev, 0); 604 break; 605 } 606 607 return count; 608 } 609 610 static ssize_t radeon_hwmon_get_pwm1_min(struct device *dev, 611 struct device_attribute *attr, 612 char *buf) 613 { 614 return sprintf(buf, "%i\n", 0); 615 } 616 617 static ssize_t radeon_hwmon_get_pwm1_max(struct device *dev, 618 struct device_attribute *attr, 619 char *buf) 620 { 621 return sprintf(buf, "%i\n", 255); 622 } 623 624 static ssize_t radeon_hwmon_set_pwm1(struct device *dev, 625 struct device_attribute *attr, 626 const char *buf, size_t count) 627 { 628 struct radeon_device *rdev = dev_get_drvdata(dev); 629 int err; 630 u32 value; 631 632 err = kstrtou32(buf, 10, &value); 633 if (err) 634 return err; 635 636 value = (value * 100) / 255; 637 638 err = rdev->asic->dpm.set_fan_speed_percent(rdev, value); 639 if (err) 640 return err; 641 642 return count; 643 } 644 645 static ssize_t radeon_hwmon_get_pwm1(struct device *dev, 646 struct device_attribute *attr, 647 char *buf) 648 { 649 struct radeon_device *rdev = dev_get_drvdata(dev); 650 int err; 651 u32 speed; 652 653 err = rdev->asic->dpm.get_fan_speed_percent(rdev, &speed); 654 if (err) 655 return err; 656 657 speed = (speed * 255) / 100; 658 659 return sprintf(buf, "%i\n", speed); 660 } 661 662 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); 663 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); 664 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state); 665 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 666 radeon_get_dpm_forced_performance_level, 667 radeon_set_dpm_forced_performance_level); 668 669 static ssize_t radeon_hwmon_show_temp(struct device *dev, 670 struct device_attribute *attr, 671 char *buf) 672 { 673 struct radeon_device *rdev = dev_get_drvdata(dev); 674 struct drm_device *ddev = rdev_to_drm(rdev); 675 int temp; 676 677 /* Can't get temperature when the card is off */ 678 if ((rdev->flags & RADEON_IS_PX) && 679 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 680 return -EINVAL; 681 682 if (rdev->asic->pm.get_temperature) 683 temp = radeon_get_temperature(rdev); 684 else 685 temp = 0; 686 687 return sysfs_emit(buf, "%d\n", temp); 688 } 689 690 static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev, 691 struct device_attribute *attr, 692 char *buf) 693 { 694 struct radeon_device *rdev = dev_get_drvdata(dev); 695 int hyst = to_sensor_dev_attr(attr)->index; 696 int temp; 697 698 if (hyst) 699 temp = rdev->pm.dpm.thermal.min_temp; 700 else 701 temp = rdev->pm.dpm.thermal.max_temp; 702 703 return sysfs_emit(buf, "%d\n", temp); 704 } 705 706 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); 707 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 0); 708 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, radeon_hwmon_show_temp_thresh, NULL, 1); 709 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1, radeon_hwmon_set_pwm1, 0); 710 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, radeon_hwmon_get_pwm1_enable, radeon_hwmon_set_pwm1_enable, 0); 711 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, radeon_hwmon_get_pwm1_min, NULL, 0); 712 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, radeon_hwmon_get_pwm1_max, NULL, 0); 713 714 static ssize_t radeon_hwmon_show_sclk(struct device *dev, 715 struct device_attribute *attr, char *buf) 716 { 717 struct radeon_device *rdev = dev_get_drvdata(dev); 718 struct drm_device *ddev = rdev_to_drm(rdev); 719 u32 sclk = 0; 720 721 /* Can't get clock frequency when the card is off */ 722 if ((rdev->flags & RADEON_IS_PX) && 723 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 724 return -EINVAL; 725 726 if (rdev->asic->dpm.get_current_sclk) 727 sclk = radeon_dpm_get_current_sclk(rdev); 728 729 /* Value returned by dpm is in 10 KHz units, need to convert it into Hz 730 for hwmon */ 731 sclk *= 10000; 732 733 return sysfs_emit(buf, "%u\n", sclk); 734 } 735 736 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, radeon_hwmon_show_sclk, NULL, 737 0); 738 739 static ssize_t radeon_hwmon_show_vddc(struct device *dev, 740 struct device_attribute *attr, char *buf) 741 { 742 struct radeon_device *rdev = dev_get_drvdata(dev); 743 struct drm_device *ddev = rdev_to_drm(rdev); 744 u16 vddc = 0; 745 746 /* Can't get vddc when the card is off */ 747 if ((rdev->flags & RADEON_IS_PX) && 748 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) 749 return -EINVAL; 750 751 if (rdev->asic->dpm.get_current_vddc) 752 vddc = rdev->asic->dpm.get_current_vddc(rdev); 753 754 return sysfs_emit(buf, "%u\n", vddc); 755 } 756 757 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, radeon_hwmon_show_vddc, NULL, 758 0); 759 760 static struct attribute *hwmon_attributes[] = { 761 &sensor_dev_attr_temp1_input.dev_attr.attr, 762 &sensor_dev_attr_temp1_crit.dev_attr.attr, 763 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 764 &sensor_dev_attr_pwm1.dev_attr.attr, 765 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 766 &sensor_dev_attr_pwm1_min.dev_attr.attr, 767 &sensor_dev_attr_pwm1_max.dev_attr.attr, 768 &sensor_dev_attr_freq1_input.dev_attr.attr, 769 &sensor_dev_attr_in0_input.dev_attr.attr, 770 NULL 771 }; 772 773 static umode_t hwmon_attributes_visible(struct kobject *kobj, 774 struct attribute *attr, int index) 775 { 776 struct device *dev = kobj_to_dev(kobj); 777 struct radeon_device *rdev = dev_get_drvdata(dev); 778 umode_t effective_mode = attr->mode; 779 780 /* Skip attributes if DPM is not enabled */ 781 if (rdev->pm.pm_method != PM_METHOD_DPM && 782 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 783 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 784 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 785 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 786 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 787 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 788 attr == &sensor_dev_attr_freq1_input.dev_attr.attr || 789 attr == &sensor_dev_attr_in0_input.dev_attr.attr)) 790 return 0; 791 792 /* Skip vddc attribute if get_current_vddc is not implemented */ 793 if (attr == &sensor_dev_attr_in0_input.dev_attr.attr && 794 !rdev->asic->dpm.get_current_vddc) 795 return 0; 796 797 /* Skip fan attributes if fan is not present */ 798 if (rdev->pm.no_fan && 799 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 800 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 801 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 802 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 803 return 0; 804 805 /* mask fan attributes if we have no bindings for this asic to expose */ 806 if ((!rdev->asic->dpm.get_fan_speed_percent && 807 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 808 (!rdev->asic->dpm.fan_ctrl_get_mode && 809 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 810 effective_mode &= ~S_IRUGO; 811 812 if ((!rdev->asic->dpm.set_fan_speed_percent && 813 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 814 (!rdev->asic->dpm.fan_ctrl_set_mode && 815 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 816 effective_mode &= ~S_IWUSR; 817 818 /* hide max/min values if we can't both query and manage the fan */ 819 if ((!rdev->asic->dpm.set_fan_speed_percent && 820 !rdev->asic->dpm.get_fan_speed_percent) && 821 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 822 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 823 return 0; 824 825 return effective_mode; 826 } 827 828 static const struct attribute_group hwmon_attrgroup = { 829 .attrs = hwmon_attributes, 830 .is_visible = hwmon_attributes_visible, 831 }; 832 833 static const struct attribute_group *hwmon_groups[] = { 834 &hwmon_attrgroup, 835 NULL 836 }; 837 838 static int radeon_hwmon_init(struct radeon_device *rdev) 839 { 840 int err = 0; 841 842 switch (rdev->pm.int_thermal_type) { 843 case THERMAL_TYPE_RV6XX: 844 case THERMAL_TYPE_RV770: 845 case THERMAL_TYPE_EVERGREEN: 846 case THERMAL_TYPE_NI: 847 case THERMAL_TYPE_SUMO: 848 case THERMAL_TYPE_SI: 849 case THERMAL_TYPE_CI: 850 case THERMAL_TYPE_KV: 851 if (rdev->asic->pm.get_temperature == NULL) 852 return err; 853 rdev->pm.int_hwmon_dev = hwmon_device_register_with_groups(rdev->dev, 854 "radeon", rdev, 855 hwmon_groups); 856 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 857 err = PTR_ERR(rdev->pm.int_hwmon_dev); 858 dev_err(rdev->dev, 859 "Unable to register hwmon device: %d\n", err); 860 } 861 break; 862 default: 863 break; 864 } 865 866 return err; 867 } 868 869 static void radeon_hwmon_fini(struct radeon_device *rdev) 870 { 871 if (rdev->pm.int_hwmon_dev) 872 hwmon_device_unregister(rdev->pm.int_hwmon_dev); 873 } 874 875 static void radeon_dpm_thermal_work_handler(struct work_struct *work) 876 { 877 struct radeon_device *rdev = 878 container_of(work, struct radeon_device, 879 pm.dpm.thermal.work); 880 /* switch to the thermal state */ 881 enum radeon_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 882 883 if (!rdev->pm.dpm_enabled) 884 return; 885 886 if (rdev->asic->pm.get_temperature) { 887 int temp = radeon_get_temperature(rdev); 888 889 if (temp < rdev->pm.dpm.thermal.min_temp) 890 /* switch back the user state */ 891 dpm_state = rdev->pm.dpm.user_state; 892 } else { 893 if (rdev->pm.dpm.thermal.high_to_low) 894 /* switch back the user state */ 895 dpm_state = rdev->pm.dpm.user_state; 896 } 897 mutex_lock(&rdev->pm.mutex); 898 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 899 rdev->pm.dpm.thermal_active = true; 900 else 901 rdev->pm.dpm.thermal_active = false; 902 rdev->pm.dpm.state = dpm_state; 903 mutex_unlock(&rdev->pm.mutex); 904 905 radeon_pm_compute_clocks(rdev); 906 } 907 908 static bool radeon_dpm_single_display(struct radeon_device *rdev) 909 { 910 bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? 911 true : false; 912 913 /* check if the vblank period is too short to adjust the mclk */ 914 if (single_display && rdev->asic->dpm.vblank_too_short) { 915 if (radeon_dpm_vblank_too_short(rdev)) 916 single_display = false; 917 } 918 919 /* 120hz tends to be problematic even if they are under the 920 * vblank limit. 921 */ 922 if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) 923 single_display = false; 924 925 return single_display; 926 } 927 928 static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, 929 enum radeon_pm_state_type dpm_state) 930 { 931 int i; 932 struct radeon_ps *ps; 933 u32 ui_class; 934 bool single_display = radeon_dpm_single_display(rdev); 935 936 /* certain older asics have a separare 3D performance state, 937 * so try that first if the user selected performance 938 */ 939 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 940 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 941 /* balanced states don't exist at the moment */ 942 if (dpm_state == POWER_STATE_TYPE_BALANCED) 943 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 944 945 restart_search: 946 /* Pick the best power state based on current conditions */ 947 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 948 ps = &rdev->pm.dpm.ps[i]; 949 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 950 switch (dpm_state) { 951 /* user states */ 952 case POWER_STATE_TYPE_BATTERY: 953 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 954 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 955 if (single_display) 956 return ps; 957 } else 958 return ps; 959 } 960 break; 961 case POWER_STATE_TYPE_BALANCED: 962 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 963 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 964 if (single_display) 965 return ps; 966 } else 967 return ps; 968 } 969 break; 970 case POWER_STATE_TYPE_PERFORMANCE: 971 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 972 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 973 if (single_display) 974 return ps; 975 } else 976 return ps; 977 } 978 break; 979 /* internal states */ 980 case POWER_STATE_TYPE_INTERNAL_UVD: 981 if (rdev->pm.dpm.uvd_ps) 982 return rdev->pm.dpm.uvd_ps; 983 else 984 break; 985 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 986 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 987 return ps; 988 break; 989 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 990 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 991 return ps; 992 break; 993 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 994 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 995 return ps; 996 break; 997 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 998 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 999 return ps; 1000 break; 1001 case POWER_STATE_TYPE_INTERNAL_BOOT: 1002 return rdev->pm.dpm.boot_ps; 1003 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1004 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 1005 return ps; 1006 break; 1007 case POWER_STATE_TYPE_INTERNAL_ACPI: 1008 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 1009 return ps; 1010 break; 1011 case POWER_STATE_TYPE_INTERNAL_ULV: 1012 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 1013 return ps; 1014 break; 1015 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1016 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 1017 return ps; 1018 break; 1019 default: 1020 break; 1021 } 1022 } 1023 /* use a fallback state if we didn't match */ 1024 switch (dpm_state) { 1025 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1026 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1027 goto restart_search; 1028 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1029 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1030 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1031 if (rdev->pm.dpm.uvd_ps) { 1032 return rdev->pm.dpm.uvd_ps; 1033 } else { 1034 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1035 goto restart_search; 1036 } 1037 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1038 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 1039 goto restart_search; 1040 case POWER_STATE_TYPE_INTERNAL_ACPI: 1041 dpm_state = POWER_STATE_TYPE_BATTERY; 1042 goto restart_search; 1043 case POWER_STATE_TYPE_BATTERY: 1044 case POWER_STATE_TYPE_BALANCED: 1045 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1046 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1047 goto restart_search; 1048 default: 1049 break; 1050 } 1051 1052 return NULL; 1053 } 1054 1055 static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) 1056 { 1057 int i; 1058 struct radeon_ps *ps; 1059 enum radeon_pm_state_type dpm_state; 1060 int ret; 1061 bool single_display = radeon_dpm_single_display(rdev); 1062 1063 /* if dpm init failed */ 1064 if (!rdev->pm.dpm_enabled) 1065 return; 1066 1067 if (rdev->pm.dpm.user_state != rdev->pm.dpm.state) { 1068 /* add other state override checks here */ 1069 if ((!rdev->pm.dpm.thermal_active) && 1070 (!rdev->pm.dpm.uvd_active)) 1071 rdev->pm.dpm.state = rdev->pm.dpm.user_state; 1072 } 1073 dpm_state = rdev->pm.dpm.state; 1074 1075 ps = radeon_dpm_pick_power_state(rdev, dpm_state); 1076 if (ps) 1077 rdev->pm.dpm.requested_ps = ps; 1078 else 1079 return; 1080 1081 /* no need to reprogram if nothing changed unless we are on BTC+ */ 1082 if (rdev->pm.dpm.current_ps == rdev->pm.dpm.requested_ps) { 1083 /* vce just modifies an existing state so force a change */ 1084 if (ps->vce_active != rdev->pm.dpm.vce_active) 1085 goto force; 1086 /* user has made a display change (such as timing) */ 1087 if (rdev->pm.dpm.single_display != single_display) 1088 goto force; 1089 if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { 1090 /* for pre-BTC and APUs if the num crtcs changed but state is the same, 1091 * all we need to do is update the display configuration. 1092 */ 1093 if (rdev->pm.dpm.new_active_crtcs != rdev->pm.dpm.current_active_crtcs) { 1094 /* update display watermarks based on new power state */ 1095 radeon_bandwidth_update(rdev); 1096 /* update displays */ 1097 radeon_dpm_display_configuration_changed(rdev); 1098 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1099 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1100 } 1101 return; 1102 } else { 1103 /* for BTC+ if the num crtcs hasn't changed and state is the same, 1104 * nothing to do, if the num crtcs is > 1 and state is the same, 1105 * update display configuration. 1106 */ 1107 if (rdev->pm.dpm.new_active_crtcs == 1108 rdev->pm.dpm.current_active_crtcs) { 1109 return; 1110 } else { 1111 if ((rdev->pm.dpm.current_active_crtc_count > 1) && 1112 (rdev->pm.dpm.new_active_crtc_count > 1)) { 1113 /* update display watermarks based on new power state */ 1114 radeon_bandwidth_update(rdev); 1115 /* update displays */ 1116 radeon_dpm_display_configuration_changed(rdev); 1117 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1118 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1119 return; 1120 } 1121 } 1122 } 1123 } 1124 1125 force: 1126 if (radeon_dpm == 1) { 1127 printk("switching from power state:\n"); 1128 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.current_ps); 1129 printk("switching to power state:\n"); 1130 radeon_dpm_print_power_state(rdev, rdev->pm.dpm.requested_ps); 1131 } 1132 1133 down_write(&rdev->pm.mclk_lock); 1134 mutex_lock(&rdev->ring_lock); 1135 1136 /* update whether vce is active */ 1137 ps->vce_active = rdev->pm.dpm.vce_active; 1138 1139 ret = radeon_dpm_pre_set_power_state(rdev); 1140 if (ret) 1141 goto done; 1142 1143 /* update display watermarks based on new power state */ 1144 radeon_bandwidth_update(rdev); 1145 /* update displays */ 1146 radeon_dpm_display_configuration_changed(rdev); 1147 1148 /* wait for the rings to drain */ 1149 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1150 struct radeon_ring *ring = &rdev->ring[i]; 1151 if (ring->ready) 1152 radeon_fence_wait_empty(rdev, i); 1153 } 1154 1155 /* program the new power state */ 1156 radeon_dpm_set_power_state(rdev); 1157 1158 /* update current power state */ 1159 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps; 1160 1161 radeon_dpm_post_set_power_state(rdev); 1162 1163 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; 1164 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; 1165 rdev->pm.dpm.single_display = single_display; 1166 1167 if (rdev->asic->dpm.force_performance_level) { 1168 if (rdev->pm.dpm.thermal_active) { 1169 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 1170 /* force low perf level for thermal */ 1171 radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW); 1172 /* save the user's level */ 1173 rdev->pm.dpm.forced_level = level; 1174 } else { 1175 /* otherwise, user selected level */ 1176 radeon_dpm_force_performance_level(rdev, rdev->pm.dpm.forced_level); 1177 } 1178 } 1179 1180 done: 1181 mutex_unlock(&rdev->ring_lock); 1182 up_write(&rdev->pm.mclk_lock); 1183 } 1184 1185 void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable) 1186 { 1187 enum radeon_pm_state_type dpm_state; 1188 1189 if (rdev->asic->dpm.powergate_uvd) { 1190 mutex_lock(&rdev->pm.mutex); 1191 /* don't powergate anything if we 1192 have active but pause streams */ 1193 enable |= rdev->pm.dpm.sd > 0; 1194 enable |= rdev->pm.dpm.hd > 0; 1195 /* enable/disable UVD */ 1196 radeon_dpm_powergate_uvd(rdev, !enable); 1197 mutex_unlock(&rdev->pm.mutex); 1198 } else { 1199 if (enable) { 1200 mutex_lock(&rdev->pm.mutex); 1201 rdev->pm.dpm.uvd_active = true; 1202 /* disable this for now */ 1203 #if 0 1204 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) 1205 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD; 1206 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) 1207 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1208 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1)) 1209 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1210 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) 1211 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2; 1212 else 1213 #endif 1214 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD; 1215 rdev->pm.dpm.state = dpm_state; 1216 mutex_unlock(&rdev->pm.mutex); 1217 } else { 1218 mutex_lock(&rdev->pm.mutex); 1219 rdev->pm.dpm.uvd_active = false; 1220 mutex_unlock(&rdev->pm.mutex); 1221 } 1222 1223 radeon_pm_compute_clocks(rdev); 1224 } 1225 } 1226 1227 void radeon_dpm_enable_vce(struct radeon_device *rdev, bool enable) 1228 { 1229 if (enable) { 1230 mutex_lock(&rdev->pm.mutex); 1231 rdev->pm.dpm.vce_active = true; 1232 /* XXX select vce level based on ring/task */ 1233 rdev->pm.dpm.vce_level = RADEON_VCE_LEVEL_AC_ALL; 1234 mutex_unlock(&rdev->pm.mutex); 1235 } else { 1236 mutex_lock(&rdev->pm.mutex); 1237 rdev->pm.dpm.vce_active = false; 1238 mutex_unlock(&rdev->pm.mutex); 1239 } 1240 1241 radeon_pm_compute_clocks(rdev); 1242 } 1243 1244 static void radeon_pm_suspend_old(struct radeon_device *rdev) 1245 { 1246 mutex_lock(&rdev->pm.mutex); 1247 if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1248 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) 1249 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED; 1250 } 1251 mutex_unlock(&rdev->pm.mutex); 1252 1253 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 1254 } 1255 1256 static void radeon_pm_suspend_dpm(struct radeon_device *rdev) 1257 { 1258 mutex_lock(&rdev->pm.mutex); 1259 /* disable dpm */ 1260 radeon_dpm_disable(rdev); 1261 /* reset the power state */ 1262 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1263 rdev->pm.dpm_enabled = false; 1264 mutex_unlock(&rdev->pm.mutex); 1265 } 1266 1267 void radeon_pm_suspend(struct radeon_device *rdev) 1268 { 1269 if (rdev->pm.pm_method == PM_METHOD_DPM) 1270 radeon_pm_suspend_dpm(rdev); 1271 else 1272 radeon_pm_suspend_old(rdev); 1273 } 1274 1275 static void radeon_pm_resume_old(struct radeon_device *rdev) 1276 { 1277 /* set up the default clocks if the MC ucode is loaded */ 1278 if ((rdev->family >= CHIP_BARTS) && 1279 (rdev->family <= CHIP_CAYMAN) && 1280 rdev->mc_fw) { 1281 if (rdev->pm.default_vddc) 1282 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1283 SET_VOLTAGE_TYPE_ASIC_VDDC); 1284 if (rdev->pm.default_vddci) 1285 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1286 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1287 if (rdev->pm.default_sclk) 1288 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1289 if (rdev->pm.default_mclk) 1290 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1291 } 1292 /* asic init will reset the default power state */ 1293 mutex_lock(&rdev->pm.mutex); 1294 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 1295 rdev->pm.current_clock_mode_index = 0; 1296 rdev->pm.current_sclk = rdev->pm.default_sclk; 1297 rdev->pm.current_mclk = rdev->pm.default_mclk; 1298 if (rdev->pm.power_state) { 1299 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; 1300 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci; 1301 } 1302 if (rdev->pm.pm_method == PM_METHOD_DYNPM 1303 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) { 1304 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1305 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1306 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1307 } 1308 mutex_unlock(&rdev->pm.mutex); 1309 radeon_pm_compute_clocks(rdev); 1310 } 1311 1312 static void radeon_pm_resume_dpm(struct radeon_device *rdev) 1313 { 1314 int ret; 1315 1316 /* asic init will reset to the boot state */ 1317 mutex_lock(&rdev->pm.mutex); 1318 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1319 radeon_dpm_setup_asic(rdev); 1320 ret = radeon_dpm_enable(rdev); 1321 mutex_unlock(&rdev->pm.mutex); 1322 if (ret) 1323 goto dpm_resume_fail; 1324 rdev->pm.dpm_enabled = true; 1325 return; 1326 1327 dpm_resume_fail: 1328 DRM_ERROR("radeon: dpm resume failed\n"); 1329 if ((rdev->family >= CHIP_BARTS) && 1330 (rdev->family <= CHIP_CAYMAN) && 1331 rdev->mc_fw) { 1332 if (rdev->pm.default_vddc) 1333 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1334 SET_VOLTAGE_TYPE_ASIC_VDDC); 1335 if (rdev->pm.default_vddci) 1336 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1337 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1338 if (rdev->pm.default_sclk) 1339 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1340 if (rdev->pm.default_mclk) 1341 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1342 } 1343 } 1344 1345 void radeon_pm_resume(struct radeon_device *rdev) 1346 { 1347 if (rdev->pm.pm_method == PM_METHOD_DPM) 1348 radeon_pm_resume_dpm(rdev); 1349 else 1350 radeon_pm_resume_old(rdev); 1351 } 1352 1353 static int radeon_pm_init_old(struct radeon_device *rdev) 1354 { 1355 int ret; 1356 1357 rdev->pm.profile = PM_PROFILE_DEFAULT; 1358 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1359 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1360 rdev->pm.dynpm_can_upclock = true; 1361 rdev->pm.dynpm_can_downclock = true; 1362 rdev->pm.default_sclk = rdev->clock.default_sclk; 1363 rdev->pm.default_mclk = rdev->clock.default_mclk; 1364 rdev->pm.current_sclk = rdev->clock.default_sclk; 1365 rdev->pm.current_mclk = rdev->clock.default_mclk; 1366 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1367 1368 if (rdev->bios) { 1369 if (rdev->is_atom_bios) 1370 radeon_atombios_get_power_modes(rdev); 1371 else 1372 radeon_combios_get_power_modes(rdev); 1373 radeon_pm_print_states(rdev); 1374 radeon_pm_init_profile(rdev); 1375 /* set up the default clocks if the MC ucode is loaded */ 1376 if ((rdev->family >= CHIP_BARTS) && 1377 (rdev->family <= CHIP_CAYMAN) && 1378 rdev->mc_fw) { 1379 if (rdev->pm.default_vddc) 1380 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1381 SET_VOLTAGE_TYPE_ASIC_VDDC); 1382 if (rdev->pm.default_vddci) 1383 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1384 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1385 if (rdev->pm.default_sclk) 1386 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1387 if (rdev->pm.default_mclk) 1388 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1389 } 1390 } 1391 1392 /* set up the internal thermal sensor if applicable */ 1393 ret = radeon_hwmon_init(rdev); 1394 if (ret) 1395 return ret; 1396 1397 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler); 1398 1399 if (rdev->pm.num_power_states > 1) { 1400 radeon_debugfs_pm_init(rdev); 1401 DRM_INFO("radeon: power management initialized\n"); 1402 } 1403 1404 return 0; 1405 } 1406 1407 static void radeon_dpm_print_power_states(struct radeon_device *rdev) 1408 { 1409 int i; 1410 1411 for (i = 0; i < rdev->pm.dpm.num_ps; i++) { 1412 printk("== power state %d ==\n", i); 1413 radeon_dpm_print_power_state(rdev, &rdev->pm.dpm.ps[i]); 1414 } 1415 } 1416 1417 static int radeon_pm_init_dpm(struct radeon_device *rdev) 1418 { 1419 int ret; 1420 1421 /* default to balanced state */ 1422 rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; 1423 rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; 1424 rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; 1425 rdev->pm.default_sclk = rdev->clock.default_sclk; 1426 rdev->pm.default_mclk = rdev->clock.default_mclk; 1427 rdev->pm.current_sclk = rdev->clock.default_sclk; 1428 rdev->pm.current_mclk = rdev->clock.default_mclk; 1429 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; 1430 1431 if (rdev->bios && rdev->is_atom_bios) 1432 radeon_atombios_get_power_modes(rdev); 1433 else 1434 return -EINVAL; 1435 1436 /* set up the internal thermal sensor if applicable */ 1437 ret = radeon_hwmon_init(rdev); 1438 if (ret) 1439 return ret; 1440 1441 INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler); 1442 mutex_lock(&rdev->pm.mutex); 1443 radeon_dpm_init(rdev); 1444 rdev->pm.dpm.current_ps = rdev->pm.dpm.requested_ps = rdev->pm.dpm.boot_ps; 1445 if (radeon_dpm == 1) 1446 radeon_dpm_print_power_states(rdev); 1447 radeon_dpm_setup_asic(rdev); 1448 ret = radeon_dpm_enable(rdev); 1449 mutex_unlock(&rdev->pm.mutex); 1450 if (ret) 1451 goto dpm_failed; 1452 rdev->pm.dpm_enabled = true; 1453 1454 radeon_debugfs_pm_init(rdev); 1455 1456 DRM_INFO("radeon: dpm initialized\n"); 1457 1458 return 0; 1459 1460 dpm_failed: 1461 rdev->pm.dpm_enabled = false; 1462 if ((rdev->family >= CHIP_BARTS) && 1463 (rdev->family <= CHIP_CAYMAN) && 1464 rdev->mc_fw) { 1465 if (rdev->pm.default_vddc) 1466 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc, 1467 SET_VOLTAGE_TYPE_ASIC_VDDC); 1468 if (rdev->pm.default_vddci) 1469 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci, 1470 SET_VOLTAGE_TYPE_ASIC_VDDCI); 1471 if (rdev->pm.default_sclk) 1472 radeon_set_engine_clock(rdev, rdev->pm.default_sclk); 1473 if (rdev->pm.default_mclk) 1474 radeon_set_memory_clock(rdev, rdev->pm.default_mclk); 1475 } 1476 DRM_ERROR("radeon: dpm initialization failed\n"); 1477 return ret; 1478 } 1479 1480 struct radeon_dpm_quirk { 1481 u32 chip_vendor; 1482 u32 chip_device; 1483 u32 subsys_vendor; 1484 u32 subsys_device; 1485 }; 1486 1487 /* cards with dpm stability problems */ 1488 static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { 1489 /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ 1490 { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, 1491 /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ 1492 { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, 1493 { 0, 0, 0, 0 }, 1494 }; 1495 1496 int radeon_pm_init(struct radeon_device *rdev) 1497 { 1498 struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; 1499 bool disable_dpm = false; 1500 1501 /* Apply dpm quirks */ 1502 while (p && p->chip_device != 0) { 1503 if (rdev->pdev->vendor == p->chip_vendor && 1504 rdev->pdev->device == p->chip_device && 1505 rdev->pdev->subsystem_vendor == p->subsys_vendor && 1506 rdev->pdev->subsystem_device == p->subsys_device) { 1507 disable_dpm = true; 1508 break; 1509 } 1510 ++p; 1511 } 1512 1513 /* enable dpm on rv6xx+ */ 1514 switch (rdev->family) { 1515 case CHIP_RV610: 1516 case CHIP_RV630: 1517 case CHIP_RV620: 1518 case CHIP_RV635: 1519 case CHIP_RV670: 1520 case CHIP_RS780: 1521 case CHIP_RS880: 1522 case CHIP_RV770: 1523 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1524 if (!rdev->rlc_fw) 1525 rdev->pm.pm_method = PM_METHOD_PROFILE; 1526 else if ((rdev->family >= CHIP_RV770) && 1527 (!(rdev->flags & RADEON_IS_IGP)) && 1528 (!rdev->smc_fw)) 1529 rdev->pm.pm_method = PM_METHOD_PROFILE; 1530 else if (radeon_dpm == 1) 1531 rdev->pm.pm_method = PM_METHOD_DPM; 1532 else 1533 rdev->pm.pm_method = PM_METHOD_PROFILE; 1534 break; 1535 case CHIP_RV730: 1536 case CHIP_RV710: 1537 case CHIP_RV740: 1538 case CHIP_CEDAR: 1539 case CHIP_REDWOOD: 1540 case CHIP_JUNIPER: 1541 case CHIP_CYPRESS: 1542 case CHIP_HEMLOCK: 1543 case CHIP_PALM: 1544 case CHIP_SUMO: 1545 case CHIP_SUMO2: 1546 case CHIP_BARTS: 1547 case CHIP_TURKS: 1548 case CHIP_CAICOS: 1549 case CHIP_CAYMAN: 1550 case CHIP_ARUBA: 1551 case CHIP_TAHITI: 1552 case CHIP_PITCAIRN: 1553 case CHIP_VERDE: 1554 case CHIP_OLAND: 1555 case CHIP_HAINAN: 1556 case CHIP_BONAIRE: 1557 case CHIP_KABINI: 1558 case CHIP_KAVERI: 1559 case CHIP_HAWAII: 1560 case CHIP_MULLINS: 1561 /* DPM requires the RLC, RV770+ dGPU requires SMC */ 1562 if (!rdev->rlc_fw) 1563 rdev->pm.pm_method = PM_METHOD_PROFILE; 1564 else if ((rdev->family >= CHIP_RV770) && 1565 (!(rdev->flags & RADEON_IS_IGP)) && 1566 (!rdev->smc_fw)) 1567 rdev->pm.pm_method = PM_METHOD_PROFILE; 1568 else if (disable_dpm && (radeon_dpm == -1)) 1569 rdev->pm.pm_method = PM_METHOD_PROFILE; 1570 else if (radeon_dpm == 0) 1571 rdev->pm.pm_method = PM_METHOD_PROFILE; 1572 else 1573 rdev->pm.pm_method = PM_METHOD_DPM; 1574 break; 1575 default: 1576 /* default to profile method */ 1577 rdev->pm.pm_method = PM_METHOD_PROFILE; 1578 break; 1579 } 1580 1581 if (rdev->pm.pm_method == PM_METHOD_DPM) 1582 return radeon_pm_init_dpm(rdev); 1583 else 1584 return radeon_pm_init_old(rdev); 1585 } 1586 1587 int radeon_pm_late_init(struct radeon_device *rdev) 1588 { 1589 int ret = 0; 1590 1591 if (rdev->pm.pm_method == PM_METHOD_DPM) { 1592 if (rdev->pm.dpm_enabled) { 1593 if (!rdev->pm.sysfs_initialized) { 1594 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state); 1595 if (ret) 1596 DRM_ERROR("failed to create device file for dpm state\n"); 1597 ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1598 if (ret) 1599 DRM_ERROR("failed to create device file for dpm state\n"); 1600 /* XXX: these are noops for dpm but are here for backwards compat */ 1601 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1602 if (ret) 1603 DRM_ERROR("failed to create device file for power profile\n"); 1604 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1605 if (ret) 1606 DRM_ERROR("failed to create device file for power method\n"); 1607 rdev->pm.sysfs_initialized = true; 1608 } 1609 1610 mutex_lock(&rdev->pm.mutex); 1611 ret = radeon_dpm_late_enable(rdev); 1612 mutex_unlock(&rdev->pm.mutex); 1613 if (ret) { 1614 rdev->pm.dpm_enabled = false; 1615 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); 1616 } else { 1617 /* set the dpm state for PX since there won't be 1618 * a modeset to call this. 1619 */ 1620 radeon_pm_compute_clocks(rdev); 1621 } 1622 } 1623 } else { 1624 if ((rdev->pm.num_power_states > 1) && 1625 (!rdev->pm.sysfs_initialized)) { 1626 /* where's the best place to put these? */ 1627 ret = device_create_file(rdev->dev, &dev_attr_power_profile); 1628 if (ret) 1629 DRM_ERROR("failed to create device file for power profile\n"); 1630 ret = device_create_file(rdev->dev, &dev_attr_power_method); 1631 if (ret) 1632 DRM_ERROR("failed to create device file for power method\n"); 1633 else 1634 rdev->pm.sysfs_initialized = true; 1635 } 1636 } 1637 return ret; 1638 } 1639 1640 static void radeon_pm_fini_old(struct radeon_device *rdev) 1641 { 1642 if (rdev->pm.num_power_states > 1) { 1643 mutex_lock(&rdev->pm.mutex); 1644 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1645 rdev->pm.profile = PM_PROFILE_DEFAULT; 1646 radeon_pm_update_profile(rdev); 1647 radeon_pm_set_clocks(rdev); 1648 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1649 /* reset default clocks */ 1650 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED; 1651 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1652 radeon_pm_set_clocks(rdev); 1653 } 1654 mutex_unlock(&rdev->pm.mutex); 1655 1656 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 1657 1658 device_remove_file(rdev->dev, &dev_attr_power_profile); 1659 device_remove_file(rdev->dev, &dev_attr_power_method); 1660 } 1661 1662 radeon_hwmon_fini(rdev); 1663 kfree(rdev->pm.power_state); 1664 } 1665 1666 static void radeon_pm_fini_dpm(struct radeon_device *rdev) 1667 { 1668 if (rdev->pm.num_power_states > 1) { 1669 mutex_lock(&rdev->pm.mutex); 1670 radeon_dpm_disable(rdev); 1671 mutex_unlock(&rdev->pm.mutex); 1672 1673 device_remove_file(rdev->dev, &dev_attr_power_dpm_state); 1674 device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level); 1675 /* XXX backwards compat */ 1676 device_remove_file(rdev->dev, &dev_attr_power_profile); 1677 device_remove_file(rdev->dev, &dev_attr_power_method); 1678 } 1679 radeon_dpm_fini(rdev); 1680 1681 radeon_hwmon_fini(rdev); 1682 kfree(rdev->pm.power_state); 1683 } 1684 1685 void radeon_pm_fini(struct radeon_device *rdev) 1686 { 1687 if (rdev->pm.pm_method == PM_METHOD_DPM) 1688 radeon_pm_fini_dpm(rdev); 1689 else 1690 radeon_pm_fini_old(rdev); 1691 } 1692 1693 static void radeon_pm_compute_clocks_old(struct radeon_device *rdev) 1694 { 1695 struct drm_device *ddev = rdev_to_drm(rdev); 1696 struct drm_crtc *crtc; 1697 struct radeon_crtc *radeon_crtc; 1698 1699 if (rdev->pm.num_power_states < 2) 1700 return; 1701 1702 mutex_lock(&rdev->pm.mutex); 1703 1704 rdev->pm.active_crtcs = 0; 1705 rdev->pm.active_crtc_count = 0; 1706 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 1707 list_for_each_entry(crtc, 1708 &ddev->mode_config.crtc_list, head) { 1709 radeon_crtc = to_radeon_crtc(crtc); 1710 if (radeon_crtc->enabled) { 1711 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 1712 rdev->pm.active_crtc_count++; 1713 } 1714 } 1715 } 1716 1717 if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 1718 radeon_pm_update_profile(rdev); 1719 radeon_pm_set_clocks(rdev); 1720 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) { 1721 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) { 1722 if (rdev->pm.active_crtc_count > 1) { 1723 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1724 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1725 1726 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED; 1727 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT; 1728 radeon_pm_get_dynpm_state(rdev); 1729 radeon_pm_set_clocks(rdev); 1730 1731 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); 1732 } 1733 } else if (rdev->pm.active_crtc_count == 1) { 1734 /* TODO: Increase clocks if needed for current mode */ 1735 1736 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) { 1737 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1738 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK; 1739 radeon_pm_get_dynpm_state(rdev); 1740 radeon_pm_set_clocks(rdev); 1741 1742 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1743 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1744 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) { 1745 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; 1746 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1747 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1748 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); 1749 } 1750 } else { /* count == 0 */ 1751 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { 1752 cancel_delayed_work(&rdev->pm.dynpm_idle_work); 1753 1754 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM; 1755 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM; 1756 radeon_pm_get_dynpm_state(rdev); 1757 radeon_pm_set_clocks(rdev); 1758 } 1759 } 1760 } 1761 } 1762 1763 mutex_unlock(&rdev->pm.mutex); 1764 } 1765 1766 static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev) 1767 { 1768 struct drm_device *ddev = rdev_to_drm(rdev); 1769 struct drm_crtc *crtc; 1770 struct radeon_crtc *radeon_crtc; 1771 struct radeon_connector *radeon_connector; 1772 1773 if (!rdev->pm.dpm_enabled) 1774 return; 1775 1776 mutex_lock(&rdev->pm.mutex); 1777 1778 /* update active crtc counts */ 1779 rdev->pm.dpm.new_active_crtcs = 0; 1780 rdev->pm.dpm.new_active_crtc_count = 0; 1781 rdev->pm.dpm.high_pixelclock_count = 0; 1782 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) { 1783 list_for_each_entry(crtc, 1784 &ddev->mode_config.crtc_list, head) { 1785 radeon_crtc = to_radeon_crtc(crtc); 1786 if (crtc->enabled) { 1787 rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id); 1788 rdev->pm.dpm.new_active_crtc_count++; 1789 if (!radeon_crtc->connector) 1790 continue; 1791 1792 radeon_connector = to_radeon_connector(radeon_crtc->connector); 1793 if (radeon_connector->pixelclock_for_modeset > 297000) 1794 rdev->pm.dpm.high_pixelclock_count++; 1795 } 1796 } 1797 } 1798 1799 /* update battery/ac status */ 1800 if (power_supply_is_system_supplied() > 0) 1801 rdev->pm.dpm.ac_power = true; 1802 else 1803 rdev->pm.dpm.ac_power = false; 1804 1805 radeon_dpm_change_power_state_locked(rdev); 1806 1807 mutex_unlock(&rdev->pm.mutex); 1808 1809 } 1810 1811 void radeon_pm_compute_clocks(struct radeon_device *rdev) 1812 { 1813 if (rdev->pm.pm_method == PM_METHOD_DPM) 1814 radeon_pm_compute_clocks_dpm(rdev); 1815 else 1816 radeon_pm_compute_clocks_old(rdev); 1817 } 1818 1819 static bool radeon_pm_in_vbl(struct radeon_device *rdev) 1820 { 1821 int crtc, vpos, hpos, vbl_status; 1822 bool in_vbl = true; 1823 1824 /* Iterate over all active crtc's. All crtc's must be in vblank, 1825 * otherwise return in_vbl == false. 1826 */ 1827 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { 1828 if (rdev->pm.active_crtcs & (1 << crtc)) { 1829 vbl_status = radeon_get_crtc_scanoutpos(rdev_to_drm(rdev), 1830 crtc, 1831 USE_REAL_VBLANKSTART, 1832 &vpos, &hpos, NULL, NULL, 1833 &rdev->mode_info.crtcs[crtc]->base.hwmode); 1834 if ((vbl_status & DRM_SCANOUTPOS_VALID) && 1835 !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK)) 1836 in_vbl = false; 1837 } 1838 } 1839 1840 return in_vbl; 1841 } 1842 1843 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) 1844 { 1845 u32 stat_crtc = 0; 1846 bool in_vbl = radeon_pm_in_vbl(rdev); 1847 1848 if (!in_vbl) 1849 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 1850 finish ? "exit" : "entry"); 1851 return in_vbl; 1852 } 1853 1854 static void radeon_dynpm_idle_work_handler(struct work_struct *work) 1855 { 1856 struct radeon_device *rdev; 1857 1858 rdev = container_of(work, struct radeon_device, 1859 pm.dynpm_idle_work.work); 1860 1861 mutex_lock(&rdev->pm.mutex); 1862 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) { 1863 int not_processed = 0; 1864 int i; 1865 1866 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1867 struct radeon_ring *ring = &rdev->ring[i]; 1868 1869 if (ring->ready) { 1870 not_processed += radeon_fence_count_emitted(rdev, i); 1871 if (not_processed >= 3) 1872 break; 1873 } 1874 } 1875 1876 if (not_processed >= 3) { /* should upclock */ 1877 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) { 1878 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1879 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1880 rdev->pm.dynpm_can_upclock) { 1881 rdev->pm.dynpm_planned_action = 1882 DYNPM_ACTION_UPCLOCK; 1883 rdev->pm.dynpm_action_timeout = jiffies + 1884 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1885 } 1886 } else if (not_processed == 0) { /* should downclock */ 1887 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) { 1888 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE; 1889 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE && 1890 rdev->pm.dynpm_can_downclock) { 1891 rdev->pm.dynpm_planned_action = 1892 DYNPM_ACTION_DOWNCLOCK; 1893 rdev->pm.dynpm_action_timeout = jiffies + 1894 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); 1895 } 1896 } 1897 1898 /* Note, radeon_pm_set_clocks is called with static_switch set 1899 * to false since we want to wait for vbl to avoid flicker. 1900 */ 1901 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE && 1902 time_after(jiffies, rdev->pm.dynpm_action_timeout)) { 1903 radeon_pm_get_dynpm_state(rdev); 1904 radeon_pm_set_clocks(rdev); 1905 } 1906 1907 schedule_delayed_work(&rdev->pm.dynpm_idle_work, 1908 msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); 1909 } 1910 mutex_unlock(&rdev->pm.mutex); 1911 } 1912 1913 /* 1914 * Debugfs info 1915 */ 1916 #if defined(CONFIG_DEBUG_FS) 1917 1918 static int radeon_debugfs_pm_info_show(struct seq_file *m, void *unused) 1919 { 1920 struct radeon_device *rdev = m->private; 1921 struct drm_device *ddev = rdev_to_drm(rdev); 1922 1923 if ((rdev->flags & RADEON_IS_PX) && 1924 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { 1925 seq_printf(m, "PX asic powered off\n"); 1926 } else if (rdev->pm.dpm_enabled) { 1927 mutex_lock(&rdev->pm.mutex); 1928 if (rdev->asic->dpm.debugfs_print_current_performance_level) 1929 radeon_dpm_debugfs_print_current_performance_level(rdev, m); 1930 else 1931 seq_printf(m, "Debugfs support not implemented for this asic\n"); 1932 mutex_unlock(&rdev->pm.mutex); 1933 } else { 1934 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); 1935 /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ 1936 if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) 1937 seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); 1938 else 1939 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); 1940 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); 1941 if (rdev->asic->pm.get_memory_clock) 1942 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); 1943 if (rdev->pm.current_vddc) 1944 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc); 1945 if (rdev->asic->pm.get_pcie_lanes) 1946 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); 1947 } 1948 1949 return 0; 1950 } 1951 1952 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_pm_info); 1953 #endif 1954 1955 static void radeon_debugfs_pm_init(struct radeon_device *rdev) 1956 { 1957 #if defined(CONFIG_DEBUG_FS) 1958 struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; 1959 1960 debugfs_create_file("radeon_pm_info", 0444, root, rdev, 1961 &radeon_debugfs_pm_info_fops); 1962 1963 #endif 1964 } 1965